comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
safeClose with have the null Check | public void readFeedDocumentsAfterSplit(boolean isContextRequired) throws InterruptedException {
CosmosAsyncContainer createdFeedCollectionForSplit = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(2 * LEASE_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseMonitorCollection = createLeaseMonitorCollection(LEASE_COLLECTION_THROUGHPUT);
CosmosAsyncClient clientWithStaleCache = null;
try {
clientWithStaleCache = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.buildAsyncClient();
CosmosAsyncDatabase databaseFromStaleClient =
clientWithStaleCache.getDatabase(createdFeedCollectionForSplit.getDatabase().getId());
CosmosAsyncContainer feedCollectionFromStaleClient =
databaseFromStaleClient.getContainer(createdFeedCollectionForSplit.getId());
CosmosAsyncContainer leaseCollectionFromStaleClient =
databaseFromStaleClient.getContainer(createdLeaseCollection.getId());
ChangeFeedProcessor staleChangeFeedProcessor = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.feedContainer(feedCollectionFromStaleClient)
.leaseContainer(leaseCollectionFromStaleClient)
.handleAllVersionsAndDeletesChanges(changeFeedProcessorItems -> {})
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix("TEST")
.setStartFromBeginning(false))
.buildChangeFeedProcessor();
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
Set<String> receivedLeaseTokensFromContext = ConcurrentHashMap.newKeySet();
Set<String> queriedLeaseTokensFromLeaseCollection = ConcurrentHashMap.newKeySet();
LeaseStateMonitor leaseStateMonitor = new LeaseStateMonitor();
ChangeFeedProcessor leaseMonitoringChangeFeedProcessor = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.handleLatestVersionChanges(leasesChangeFeedProcessorHandler(leaseStateMonitor))
.feedContainer(createdLeaseCollection)
.leaseContainer(createdLeaseMonitorCollection)
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix("MONITOR")
.setStartFromBeginning(true)
.setMaxItemCount(10)
.setLeaseRenewInterval(Duration.ofSeconds(2))
).buildChangeFeedProcessor();
ChangeFeedProcessorBuilder changeFeedProcessorBuilderForFeedMonitoring = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.feedContainer(createdFeedCollectionForSplit)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix("TEST")
.setStartFromBeginning(false));
if (isContextRequired) {
changeFeedProcessorBuilderForFeedMonitoring = changeFeedProcessorBuilderForFeedMonitoring
.handleAllVersionsAndDeletesChanges(changeFeedProcessorHandlerWithContext(receivedDocuments, receivedLeaseTokensFromContext));
} else {
changeFeedProcessorBuilderForFeedMonitoring = changeFeedProcessorBuilderForFeedMonitoring
.handleAllVersionsAndDeletesChanges(changeFeedProcessorHandler(receivedDocuments));
}
ChangeFeedProcessor changeFeedProcessor = changeFeedProcessorBuilderForFeedMonitoring.buildChangeFeedProcessor();
leaseMonitoringChangeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(200 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.onErrorResume(throwable -> {
logger.error("Change feed processor for lease monitoring did not start in the expected time", throwable);
return Mono.error(throwable);
})
.then(
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.onErrorResume(throwable -> {
logger.error("Change feed processor did not start in the expected time", throwable);
return Mono.error(throwable);
}))
.subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollectionForSplit, FEED_COUNT);
staleChangeFeedProcessor.getCurrentState().block();
Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT);
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
String leaseQuery = "select * from c where not contains(c.id, \"info\")";
List<JsonNode> leaseDocuments =
createdLeaseCollection
.queryItems(leaseQuery, JsonNode.class)
.byPage()
.blockFirst()
.getResults();
queriedLeaseTokensFromLeaseCollection
.addAll(leaseDocuments.stream().map(lease -> lease.get("LeaseToken").asText()).collect(Collectors.toList()));
createdFeedCollectionForSplit
.readThroughput().subscribeOn(Schedulers.boundedElastic())
.flatMap(currentThroughput ->
createdFeedCollectionForSplit
.replaceThroughput(ThroughputProperties.createManualThroughput(FEED_COLLECTION_THROUGHPUT_FOR_SPLIT))
.subscribeOn(Schedulers.boundedElastic())
).subscribe();
long continuationToken = Long.MAX_VALUE;
for (JsonNode item : leaseStateMonitor.receivedLeases.values()) {
JsonNode tempToken = item.get("ContinuationToken");
long continuationTokenValue = 0;
if (tempToken != null && StringUtils.isNotEmpty(tempToken.asText())) {
ChangeFeedState changeFeedState = ChangeFeedState.fromString(tempToken.asText());
continuationTokenValue =
Long.parseLong(changeFeedState.getContinuation().getCurrentContinuationToken().getToken().replace("\"", ""));
}
if (tempToken == null || continuationTokenValue == 0) {
logger.error("Found unexpected lease with continuation token value of null or 0");
try {
logger.info("ERROR LEASE FOUND {}", OBJECT_MAPPER.writerWithDefaultPrettyPrinter().writeValueAsString(item));
} catch (JsonProcessingException e) {
logger.error("Failure in processing json [{}]", e.getMessage(), e);
}
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
else {
if (continuationToken > continuationTokenValue) {
continuationToken = continuationTokenValue;
}
}
}
if (continuationToken == Long.MAX_VALUE) {
logger.error("Could not find any valid lease documents");
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
else {
leaseStateMonitor.parentContinuationToken = continuationToken;
}
leaseStateMonitor.isAfterLeaseInitialization = true;
String partitionKeyRangesPath = extractContainerSelfLink(createdFeedCollectionForSplit);
AsyncDocumentClient contextClient = getContextClient(createdDatabase);
Flux.just(1).subscribeOn(Schedulers.boundedElastic())
.flatMap(value -> {
logger.warn("Reading current throughput change.");
return contextClient.readPartitionKeyRanges(partitionKeyRangesPath, (CosmosQueryRequestOptions) null);
})
.map(partitionKeyRangeFeedResponse -> {
int count = partitionKeyRangeFeedResponse.getResults().size();
if (count < 2) {
logger.warn("Throughput change is pending.");
throw new RuntimeException("Throughput change is not done.");
}
return count;
})
.retryWhen(Retry.max(40).filter(throwable -> {
try {
logger.warn("Retrying...");
Thread.sleep(10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted exception", e);
}
return true;
}))
.last()
.doOnSuccess(partitionCount -> {
leaseStateMonitor.isAfterSplits = true;
})
.block();
assertThat(changeFeedProcessor.isStarted()).as("Change Feed Processor instance is running").isTrue();
createReadFeedDocuments(createdDocuments, createdFeedCollectionForSplit, FEED_COUNT);
waitToReceiveDocuments(receivedDocuments, 2 * CHANGE_FEED_PROCESSOR_TIMEOUT, FEED_COUNT * 2);
changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
leaseMonitoringChangeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
int leaseCount = changeFeedProcessor.getCurrentState().map(List::size).block();
assertThat(leaseCount > 1).as("Found %d leases", leaseCount).isTrue();
int leaseCountFromStaleCfp = staleChangeFeedProcessor.getCurrentState().map(List::size).block();
assertThat(leaseCountFromStaleCfp).isEqualTo(leaseCount);
assertThat(receivedDocuments.size()).isEqualTo(createdDocuments.size());
for (InternalObjectNode item : createdDocuments) {
assertThat(receivedDocuments.containsKey(item.getId())).as("Document with getId: " + item.getId()).isTrue();
}
assertThat(leaseStateMonitor.isContinuationTokenAdvancing && leaseStateMonitor.parentContinuationToken > 0)
.as("Continuation tokens for the leases after split should advance from parent value; parent: %d", leaseStateMonitor.parentContinuationToken).isTrue();
leaseDocuments = createdLeaseCollection
.queryItems(leaseQuery, JsonNode.class)
.byPage()
.blockFirst()
.getResults();
queriedLeaseTokensFromLeaseCollection.addAll(
leaseDocuments.stream().map(lease -> lease.get("LeaseToken").asText()).collect(Collectors.toList()));
if (isContextRequired) {
assertThat(receivedLeaseTokensFromContext.size())
.isEqualTo(queriedLeaseTokensFromLeaseCollection.size());
assertThat(receivedLeaseTokensFromContext.containsAll(queriedLeaseTokensFromLeaseCollection)).isTrue();
}
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} finally {
System.out.println("Start to delete FeedCollectionForSplit");
safeDeleteCollection(createdFeedCollectionForSplit);
safeDeleteCollection(createdLeaseCollection);
if (clientWithStaleCache != null) {
safeClose(clientWithStaleCache);
}
Thread.sleep(500);
}
} | safeClose(clientWithStaleCache); | public void readFeedDocumentsAfterSplit(boolean isContextRequired) throws InterruptedException {
CosmosAsyncContainer createdFeedCollectionForSplit = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(2 * LEASE_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseMonitorCollection = createLeaseMonitorCollection(LEASE_COLLECTION_THROUGHPUT);
CosmosAsyncClient clientWithStaleCache = null;
try {
clientWithStaleCache = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.buildAsyncClient();
CosmosAsyncDatabase databaseFromStaleClient =
clientWithStaleCache.getDatabase(createdFeedCollectionForSplit.getDatabase().getId());
CosmosAsyncContainer feedCollectionFromStaleClient =
databaseFromStaleClient.getContainer(createdFeedCollectionForSplit.getId());
CosmosAsyncContainer leaseCollectionFromStaleClient =
databaseFromStaleClient.getContainer(createdLeaseCollection.getId());
ChangeFeedProcessor staleChangeFeedProcessor = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.feedContainer(feedCollectionFromStaleClient)
.leaseContainer(leaseCollectionFromStaleClient)
.handleAllVersionsAndDeletesChanges(changeFeedProcessorItems -> {})
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix("TEST")
.setStartFromBeginning(false))
.buildChangeFeedProcessor();
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
Set<String> receivedLeaseTokensFromContext = ConcurrentHashMap.newKeySet();
Set<String> queriedLeaseTokensFromLeaseCollection = ConcurrentHashMap.newKeySet();
LeaseStateMonitor leaseStateMonitor = new LeaseStateMonitor();
ChangeFeedProcessor leaseMonitoringChangeFeedProcessor = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.handleLatestVersionChanges(leasesChangeFeedProcessorHandler(leaseStateMonitor))
.feedContainer(createdLeaseCollection)
.leaseContainer(createdLeaseMonitorCollection)
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix("MONITOR")
.setStartFromBeginning(true)
.setMaxItemCount(10)
.setLeaseRenewInterval(Duration.ofSeconds(2))
).buildChangeFeedProcessor();
ChangeFeedProcessorBuilder changeFeedProcessorBuilderForFeedMonitoring = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.feedContainer(createdFeedCollectionForSplit)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix("TEST")
.setStartFromBeginning(false));
if (isContextRequired) {
changeFeedProcessorBuilderForFeedMonitoring = changeFeedProcessorBuilderForFeedMonitoring
.handleAllVersionsAndDeletesChanges(changeFeedProcessorHandlerWithContext(receivedDocuments, receivedLeaseTokensFromContext));
} else {
changeFeedProcessorBuilderForFeedMonitoring = changeFeedProcessorBuilderForFeedMonitoring
.handleAllVersionsAndDeletesChanges(changeFeedProcessorHandler(receivedDocuments));
}
ChangeFeedProcessor changeFeedProcessor = changeFeedProcessorBuilderForFeedMonitoring.buildChangeFeedProcessor();
leaseMonitoringChangeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(200 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.onErrorResume(throwable -> {
logger.error("Change feed processor for lease monitoring did not start in the expected time", throwable);
return Mono.error(throwable);
})
.then(
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.onErrorResume(throwable -> {
logger.error("Change feed processor did not start in the expected time", throwable);
return Mono.error(throwable);
}))
.subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollectionForSplit, FEED_COUNT);
staleChangeFeedProcessor.getCurrentState().block();
Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT);
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
String leaseQuery = "select * from c where not contains(c.id, \"info\")";
List<JsonNode> leaseDocuments =
createdLeaseCollection
.queryItems(leaseQuery, JsonNode.class)
.byPage()
.blockFirst()
.getResults();
queriedLeaseTokensFromLeaseCollection
.addAll(leaseDocuments.stream().map(lease -> lease.get("LeaseToken").asText()).collect(Collectors.toList()));
createdFeedCollectionForSplit
.readThroughput().subscribeOn(Schedulers.boundedElastic())
.flatMap(currentThroughput ->
createdFeedCollectionForSplit
.replaceThroughput(ThroughputProperties.createManualThroughput(FEED_COLLECTION_THROUGHPUT_FOR_SPLIT))
.subscribeOn(Schedulers.boundedElastic())
).subscribe();
long continuationToken = Long.MAX_VALUE;
for (JsonNode item : leaseStateMonitor.receivedLeases.values()) {
JsonNode tempToken = item.get("ContinuationToken");
long continuationTokenValue = 0;
if (tempToken != null && StringUtils.isNotEmpty(tempToken.asText())) {
ChangeFeedState changeFeedState = ChangeFeedState.fromString(tempToken.asText());
continuationTokenValue =
Long.parseLong(changeFeedState.getContinuation().getCurrentContinuationToken().getToken().replace("\"", ""));
}
if (tempToken == null || continuationTokenValue == 0) {
logger.error("Found unexpected lease with continuation token value of null or 0");
try {
logger.info("ERROR LEASE FOUND {}", OBJECT_MAPPER.writerWithDefaultPrettyPrinter().writeValueAsString(item));
} catch (JsonProcessingException e) {
logger.error("Failure in processing json [{}]", e.getMessage(), e);
}
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
else {
if (continuationToken > continuationTokenValue) {
continuationToken = continuationTokenValue;
}
}
}
if (continuationToken == Long.MAX_VALUE) {
logger.error("Could not find any valid lease documents");
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
else {
leaseStateMonitor.parentContinuationToken = continuationToken;
}
leaseStateMonitor.isAfterLeaseInitialization = true;
String partitionKeyRangesPath = extractContainerSelfLink(createdFeedCollectionForSplit);
AsyncDocumentClient contextClient = getContextClient(createdDatabase);
Flux.just(1).subscribeOn(Schedulers.boundedElastic())
.flatMap(value -> {
logger.warn("Reading current throughput change.");
return contextClient.readPartitionKeyRanges(partitionKeyRangesPath, (CosmosQueryRequestOptions) null);
})
.map(partitionKeyRangeFeedResponse -> {
int count = partitionKeyRangeFeedResponse.getResults().size();
if (count < 2) {
logger.warn("Throughput change is pending.");
throw new RuntimeException("Throughput change is not done.");
}
return count;
})
.retryWhen(Retry.max(40).filter(throwable -> {
try {
logger.warn("Retrying...");
Thread.sleep(10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted exception", e);
}
return true;
}))
.last()
.doOnSuccess(partitionCount -> {
leaseStateMonitor.isAfterSplits = true;
})
.block();
assertThat(changeFeedProcessor.isStarted()).as("Change Feed Processor instance is running").isTrue();
createReadFeedDocuments(createdDocuments, createdFeedCollectionForSplit, FEED_COUNT);
waitToReceiveDocuments(receivedDocuments, 2 * CHANGE_FEED_PROCESSOR_TIMEOUT, FEED_COUNT * 2);
changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
leaseMonitoringChangeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
int leaseCount = changeFeedProcessor.getCurrentState().map(List::size).block();
assertThat(leaseCount > 1).as("Found %d leases", leaseCount).isTrue();
int leaseCountFromStaleCfp = staleChangeFeedProcessor.getCurrentState().map(List::size).block();
assertThat(leaseCountFromStaleCfp).isEqualTo(leaseCount);
assertThat(receivedDocuments.size()).isEqualTo(createdDocuments.size());
for (InternalObjectNode item : createdDocuments) {
assertThat(receivedDocuments.containsKey(item.getId())).as("Document with getId: " + item.getId()).isTrue();
}
assertThat(leaseStateMonitor.isContinuationTokenAdvancing && leaseStateMonitor.parentContinuationToken > 0)
.as("Continuation tokens for the leases after split should advance from parent value; parent: %d", leaseStateMonitor.parentContinuationToken).isTrue();
leaseDocuments = createdLeaseCollection
.queryItems(leaseQuery, JsonNode.class)
.byPage()
.blockFirst()
.getResults();
queriedLeaseTokensFromLeaseCollection.addAll(
leaseDocuments.stream().map(lease -> lease.get("LeaseToken").asText()).collect(Collectors.toList()));
if (isContextRequired) {
assertThat(receivedLeaseTokensFromContext.size())
.isEqualTo(queriedLeaseTokensFromLeaseCollection.size());
assertThat(receivedLeaseTokensFromContext.containsAll(queriedLeaseTokensFromLeaseCollection)).isTrue();
}
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} finally {
System.out.println("Start to delete FeedCollectionForSplit");
safeDeleteCollection(createdFeedCollectionForSplit);
safeDeleteCollection(createdLeaseCollection);
safeClose(clientWithStaleCache);
Thread.sleep(500);
}
} | class FullFidelityChangeFeedProcessorTest extends TestSuiteBase {
private final static Logger log = LoggerFactory.getLogger(FullFidelityChangeFeedProcessorTest.class);
private static final ObjectMapper OBJECT_MAPPER = Utils.getSimpleObjectMapper();
private CosmosAsyncDatabase createdDatabase;
private final String hostName = RandomStringUtils.randomAlphabetic(6);
private final int FEED_COUNT = 10;
private final int CHANGE_FEED_PROCESSOR_TIMEOUT = 5000;
private final int FEED_COLLECTION_THROUGHPUT = 400;
private final int FEED_COLLECTION_THROUGHPUT_FOR_SPLIT = 10100;
private final int LEASE_COLLECTION_THROUGHPUT = 400;
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuilders")
public FullFidelityChangeFeedProcessorTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@DataProvider
public Object[] contextTestConfigs() {
return new Object[] {true, false};
}
@Test(groups = { "emulator" }, dataProvider = "contextTestConfigs", timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT)
public void fullFidelityChangeFeedProcessorStartFromNow(boolean isContextRequired) throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
Set<String> receivedLeaseTokensFromContext = ConcurrentHashMap.newKeySet();
ChangeFeedProcessorOptions changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
ChangeFeedProcessorBuilder changeFeedProcessorBuilder = new ChangeFeedProcessorBuilder()
.options(changeFeedProcessorOptions)
.hostName(hostName)
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection);
if (isContextRequired) {
changeFeedProcessorBuilder = changeFeedProcessorBuilder
.handleAllVersionsAndDeletesChanges(changeFeedProcessorHandlerWithContext(receivedDocuments, receivedLeaseTokensFromContext));
} else {
changeFeedProcessorBuilder = changeFeedProcessorBuilder
.handleAllVersionsAndDeletesChanges(changeFeedProcessorHandler(receivedDocuments));
}
ChangeFeedProcessor changeFeedProcessor = changeFeedProcessorBuilder.buildChangeFeedProcessor();
try {
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
logger.info("Starting ChangeFeed processor");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Finished starting ChangeFeed processor");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
logger.info("Set up read feed documents");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Validating changes now");
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
String leaseQuery = "select * from c where not contains(c.id, \"info\")";
List<JsonNode> leaseDocuments =
createdLeaseCollection
.queryItems(leaseQuery, JsonNode.class)
.byPage()
.blockFirst()
.getResults();
List<String> leaseTokensCollectedFromLeaseCollection =
leaseDocuments.stream().map(lease -> lease.get("LeaseToken").asText()).collect(Collectors.toList());
if (isContextRequired) {
assertThat(leaseTokensCollectedFromLeaseCollection).isNotNull();
assertThat(receivedLeaseTokensFromContext.size()).isEqualTo(leaseTokensCollectedFromLeaseCollection.size());
assertThat(receivedLeaseTokensFromContext.containsAll(leaseTokensCollectedFromLeaseCollection)).isTrue();
}
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, dataProvider = "contextTestConfigs", timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT)
public void fullFidelityChangeFeedProcessorStartFromContinuationToken(boolean isContextRequired) throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
Set<String> receivedLeaseTokensFromContext = ConcurrentHashMap.newKeySet();
ChangeFeedProcessorOptions changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
ChangeFeedProcessorBuilder changeFeedProcessorBuilder = new ChangeFeedProcessorBuilder()
.options(changeFeedProcessorOptions)
.hostName(hostName)
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection);
if (isContextRequired) {
changeFeedProcessorBuilder = changeFeedProcessorBuilder.handleAllVersionsAndDeletesChanges(
changeFeedProcessorHandlerWithContext(receivedDocuments, receivedLeaseTokensFromContext));
} else {
changeFeedProcessorBuilder = changeFeedProcessorBuilder.handleAllVersionsAndDeletesChanges(
changeFeedProcessorHandler(receivedDocuments));
}
ChangeFeedProcessor changeFeedProcessor = changeFeedProcessorBuilder.buildChangeFeedProcessor();
try {
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
logger.info("Starting ChangeFeed processor");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Finished starting ChangeFeed processor");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
logger.info("Set up read feed documents");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Validating changes now");
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
String leaseQuery = "select * from c where not contains(c.id, \"info\")";
List<JsonNode> leaseDocuments =
createdLeaseCollection
.queryItems(leaseQuery, JsonNode.class)
.byPage()
.blockFirst()
.getResults();
List<String> leaseTokensCollectedFromLeaseCollection =
leaseDocuments.stream().map(lease -> lease.get("LeaseToken").asText()).collect(Collectors.toList());
if (isContextRequired) {
assertThat(leaseTokensCollectedFromLeaseCollection).isNotNull();
assertThat(receivedLeaseTokensFromContext.size()).isEqualTo(leaseTokensCollectedFromLeaseCollection.size());
assertThat(receivedLeaseTokensFromContext.containsAll(leaseTokensCollectedFromLeaseCollection)).isTrue();
}
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
public void getCurrentState() throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessor changeFeedProcessorMain = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
log.info("START processing from thread {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {}", Thread.currentThread().getId());
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.buildChangeFeedProcessor();
ChangeFeedProcessor changeFeedProcessorSideCart = new ChangeFeedProcessorBuilder()
.hostName("side-cart")
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
fail("ERROR - we should not execute this handler");
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.buildChangeFeedProcessor();
try {
changeFeedProcessorMain.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.then(Mono.just(changeFeedProcessorMain)
.delayElement(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.flatMap(value -> changeFeedProcessorMain.stop()
.subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
))
.subscribe();
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT);
List<ChangeFeedProcessorState> cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentState.size()).isNotZero().as("Change Feed Processor number of leases should not be 0.");
int totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Main estimated total lag at start");
List<ChangeFeedProcessorState> cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Side Cart estimated total lag at start");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Main estimated total lag");
cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Side Cart estimated total lag");
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
public void getCurrentStateWithInsertedDocuments() throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessor changeFeedProcessorMain = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
log.info("START processing from thread {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {}", Thread.currentThread().getId());
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.buildChangeFeedProcessor();
ChangeFeedProcessor changeFeedProcessorSideCart = new ChangeFeedProcessorBuilder()
.hostName("side-cart")
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
fail("ERROR - we should not execute this handler");
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.buildChangeFeedProcessor();
try {
changeFeedProcessorMain.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT);
List<ChangeFeedProcessorState> cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentState.size()).isNotZero().as("Change Feed Processor number of leases should not be 0.");
int totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Main estimated total lag at start");
List<ChangeFeedProcessorState> cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Side Cart estimated total lag at start");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info("Current state of main after inserting documents is : {}",
OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Main estimated total lag");
cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info("Current state of side cart after inserting documents is : {}",
OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Side Cart estimated total lag");
changeFeedProcessorMain.stop().subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info("Current state of main after stopping is : {}",
OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Main estimated total lag");
cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info("Current state of side cart after stopping is : {}",
OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Side Cart estimated total lag");
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
public void staledLeaseAcquiring() throws InterruptedException {
final String ownerFirst = "Owner_First";
final String ownerSecond = "Owner_Second";
final String leasePrefix = "TEST";
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessor changeFeedProcessorFirst = new ChangeFeedProcessorBuilder()
.hostName(ownerFirst)
.handleAllVersionsAndDeletesChanges(docs -> {
log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst);
log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst);
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix(leasePrefix)
)
.buildChangeFeedProcessor();
ChangeFeedProcessor changeFeedProcessorSecond = new ChangeFeedProcessorBuilder()
.hostName(ownerSecond)
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond);
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond);
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeaseRenewInterval(Duration.ofSeconds(10))
.setLeaseAcquireInterval(Duration.ofSeconds(5))
.setLeaseExpirationInterval(Duration.ofSeconds(20))
.setFeedPollDelay(Duration.ofSeconds(2))
.setLeasePrefix(leasePrefix)
.setMaxItemCount(10)
.setMaxScaleCount(0)
)
.buildChangeFeedProcessor();
changeFeedProcessorFirst
.start()
.subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.then(Mono.just(changeFeedProcessorFirst)
.delayElement(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.flatMap(value ->
changeFeedProcessorFirst.stop()
.subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
))
.subscribe();
try {
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted exception", e);
}
log.info("Update leases for Change feed processor in thread {} using host {}", Thread.currentThread().getId(), "Owner_first");
SqlParameter param = new SqlParameter();
param.setName("@PartitionLeasePrefix");
param.setValue(leasePrefix);
SqlQuerySpec querySpec = new SqlQuerySpec(
"SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Collections.singletonList(param));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
createdLeaseCollection
.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class).byPage()
.flatMap(documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults()))
.flatMap(doc -> {
ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc);
leaseDocument.setOwner("TEMP_OWNER");
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
return createdLeaseCollection.replaceItem(leaseDocument, leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), options)
.map(CosmosItemResponse::getItem);
})
.map(leaseDocument -> {
log.info("QueryItems after Change feed processor processing; found host {}", leaseDocument.getOwner());
return leaseDocument;
})
.blockLast();
changeFeedProcessorSecond
.start()
.subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT);
List<InternalObjectNode> docDefList = new ArrayList<>();
for (int i = 0; i < FEED_COUNT; i++) {
docDefList.add(getDocumentDefinition());
}
bulkInsert(createdFeedCollection, docDefList, FEED_COUNT).blockLast();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
long remainingWork = 10 * CHANGE_FEED_PROCESSOR_TIMEOUT;
while (remainingWork > 0 && changeFeedProcessorFirst.isStarted() && !changeFeedProcessorSecond.isStarted()) {
remainingWork -= 100;
Thread.sleep(100);
}
assertThat(changeFeedProcessorSecond.isStarted()).as("Change Feed Processor instance is running").isTrue();
waitToReceiveDocuments(receivedDocuments, 30 * CHANGE_FEED_PROCESSOR_TIMEOUT, FEED_COUNT);
changeFeedProcessorSecond.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("DONE");
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
public void ownerNullAcquiring() throws InterruptedException {
final String ownerFirst = "Owner_First";
final String leasePrefix = "TEST";
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessor changeFeedProcessorFirst = new ChangeFeedProcessorBuilder()
.hostName(ownerFirst)
.handleAllVersionsAndDeletesChanges(docs -> {
logger.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst);
for (ChangeFeedProcessorItem item : docs) {
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {
}
processItem(item, receivedDocuments);
}
logger.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst);
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix(leasePrefix)
.setLeaseRenewInterval(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.setLeaseAcquireInterval(Duration.ofMillis(5 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.setLeaseExpirationInterval(Duration.ofMillis(6 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.setFeedPollDelay(Duration.ofSeconds(5))
)
.buildChangeFeedProcessor();
try {
logger.info("Start more creating documents");
List<InternalObjectNode> docDefList = new ArrayList<>();
for (int i = 0; i < FEED_COUNT; i++) {
docDefList.add(getDocumentDefinition());
}
bulkInsert(createdFeedCollection, docDefList, FEED_COUNT)
.last()
.flatMap(cosmosItemResponse -> {
logger.info("Start first Change feed processor");
return changeFeedProcessorFirst.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT));
})
.then(
Mono.just(changeFeedProcessorFirst)
.flatMap( value -> {
logger.info("Update leases for Change feed processor in thread {} using host {}", Thread.currentThread().getId(), "Owner_first");
try {
Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (InterruptedException ignored) {
}
logger.info("QueryItems before Change feed processor processing");
SqlParameter param1 = new SqlParameter();
param1.setName("@PartitionLeasePrefix");
param1.setValue(leasePrefix);
SqlParameter param2 = new SqlParameter();
param2.setName("@Owner");
param2.setValue(ownerFirst);
SqlQuerySpec querySpec = new SqlQuerySpec(
"SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix) AND c.Owner=@Owner", Arrays.asList(param1, param2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
return createdLeaseCollection.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class).byPage()
.flatMap(documentFeedResponse -> reactor.core.publisher.Flux.fromIterable(documentFeedResponse.getResults()))
.flatMap(doc -> {
ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc);
leaseDocument.setOwner(null);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
return createdLeaseCollection.replaceItem(leaseDocument, leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), options)
.map(CosmosItemResponse::getItem);
})
.map(leaseDocument -> {
logger.info("QueryItems after Change feed processor processing; current Owner is'{}'", leaseDocument.getOwner());
return leaseDocument;
})
.last()
.flatMap(leaseDocument -> {
logger.info("Start creating more documents");
List<InternalObjectNode> docDefList1 = new ArrayList<>();
for (int i = 0; i < FEED_COUNT; i++) {
docDefList1.add(getDocumentDefinition());
}
return bulkInsert(createdFeedCollection, docDefList1, FEED_COUNT)
.last();
});
}))
.subscribe();
} catch (Exception ex) {
log.error("First change feed processor did not start in the expected time", ex);
throw ex;
}
long remainingWork = 20 * CHANGE_FEED_PROCESSOR_TIMEOUT;
while (remainingWork > 0 && !changeFeedProcessorFirst.isStarted()) {
remainingWork -= 100;
Thread.sleep(100);
}
waitToReceiveDocuments(receivedDocuments, 30 * CHANGE_FEED_PROCESSOR_TIMEOUT, FEED_COUNT);
assertThat(changeFeedProcessorFirst.isStarted()).as("Change Feed Processor instance is running").isTrue();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
changeFeedProcessorFirst.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 20 * TIMEOUT, enabled = false)
public void inactiveOwnersRecovery() throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
String leasePrefix = "TEST";
ChangeFeedProcessor changeFeedProcessor = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.handleAllVersionsAndDeletesChanges(fullFidelityChangeFeedProcessorHandler(receivedDocuments))
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeaseRenewInterval(Duration.ofSeconds(1))
.setLeaseAcquireInterval(Duration.ofSeconds(1))
.setLeaseExpirationInterval(Duration.ofSeconds(5))
.setFeedPollDelay(Duration.ofSeconds(1))
.setLeasePrefix(leasePrefix)
.setMaxItemCount(100)
.setMaxScaleCount(0)
.setScheduler(Schedulers.newParallel("CFP parallel",
10 * Schedulers.DEFAULT_POOL_SIZE,
true))
)
.buildChangeFeedProcessor();
try {
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (Exception ex) {
log.error("Change feed processor did not start in the expected time", ex);
throw ex;
}
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments,2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT);
log.info("Update leases with random owners");
SqlParameter param1 = new SqlParameter();
param1.setName("@PartitionLeasePrefix");
param1.setValue(leasePrefix);
SqlQuerySpec querySpec = new SqlQuerySpec(
"SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Arrays.asList(param1));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
createdLeaseCollection.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class).byPage()
.flatMap(documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults()))
.flatMap(doc -> {
ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc);
leaseDocument.setOwner(RandomStringUtils.randomAlphabetic(10));
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
return createdLeaseCollection.replaceItem(leaseDocument, leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), options)
.map(CosmosItemResponse::getItem);
})
.flatMap(leaseDocument -> createdLeaseCollection.readItem(leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), InternalObjectNode.class))
.map(doc -> {
ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc.getItem());
log.info("Change feed processor current Owner is'{}'", leaseDocument.getOwner());
return leaseDocument;
})
.blockLast();
createdDocuments.clear();
receivedDocuments.clear();
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT);
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT)
public void endToEndTimeoutConfigShouldBeSuppressed() throws InterruptedException {
CosmosAsyncClient clientWithE2ETimeoutConfig = null;
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
clientWithE2ETimeoutConfig = this.getClientBuilder()
.endToEndOperationLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofMillis(1)).build())
.contentResponseOnWriteEnabled(true)
.buildAsyncClient();
CosmosAsyncDatabase testDatabase = clientWithE2ETimeoutConfig.getDatabase(this.createdDatabase.getId());
CosmosAsyncContainer createdFeedCollectionDuplicate = testDatabase.getContainer(createdFeedCollection.getId());
CosmosAsyncContainer createdLeaseCollectionDuplicate = testDatabase.getContainer(createdLeaseCollection.getId());
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessorOptions changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
ChangeFeedProcessor changeFeedProcessor = new ChangeFeedProcessorBuilder()
.options(changeFeedProcessorOptions)
.hostName(hostName)
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
log.info("START processing from thread {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {}", Thread.currentThread().getId());
})
.feedContainer(createdFeedCollectionDuplicate)
.leaseContainer(createdLeaseCollectionDuplicate)
.buildChangeFeedProcessor();
try {
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
logger.info("Starting ChangeFeed processor");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Finished starting ChangeFeed processor");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
logger.info("Set up read feed documents");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Validating changes now");
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
safeClose(clientWithE2ETimeoutConfig);
Thread.sleep(500);
}
}
@Test(groups = { "cfp-split" }, dataProvider = "contextTestConfigs", timeOut = 160 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
private Consumer<List<ChangeFeedProcessorItem>> changeFeedProcessorHandler(Map<String, ChangeFeedProcessorItem> receivedDocuments) {
return docs -> {
logger.info("START processing from thread in test {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
logger.info("END processing from thread {}", Thread.currentThread().getId());
};
}
private BiConsumer<List<ChangeFeedProcessorItem>, ChangeFeedProcessorContext> changeFeedProcessorHandlerWithContext(
Map<String, ChangeFeedProcessorItem> receivedDocuments, Set<String> receivedLeaseTokensFromContext) {
return (docs, context) -> {
logger.info("START processing from thread in test {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
validateChangeFeedProcessorContext(context);
processChangeFeedProcessorContext(context, receivedLeaseTokensFromContext);
logger.info("END processing from thread {}", Thread.currentThread().getId());
};
}
void validateChangeFeedProcessing(ChangeFeedProcessor changeFeedProcessor, List<InternalObjectNode> createdDocuments, Map<String, ChangeFeedProcessorItem> receivedDocuments, int sleepTime) throws InterruptedException {
assertThat(changeFeedProcessor.isStarted()).as("Change Feed Processor instance is running").isTrue();
List<ChangeFeedProcessorState> cfpCurrentState = changeFeedProcessor
.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writerWithDefaultPrettyPrinter().writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
})
.block();
assertThat(cfpCurrentState).isNotNull().as("Change Feed Processor current state");
for (ChangeFeedProcessorState item : cfpCurrentState) {
assertThat(item.getHostName()).isEqualTo(hostName).as("Change Feed Processor ownership");
}
assertThat(receivedDocuments.size()).isEqualTo(FEED_COUNT);
for (InternalObjectNode item : createdDocuments) {
assertThat(receivedDocuments.containsKey(item.getId())).as("Document with getId: " + item.getId()).isTrue();
}
}
void validateChangeFeedProcessorContext(ChangeFeedProcessorContext changeFeedProcessorContext) {
String leaseToken = changeFeedProcessorContext.getLeaseToken();
assertThat(leaseToken).isNotNull();
}
private Consumer<List<ChangeFeedProcessorItem>> fullFidelityChangeFeedProcessorHandler(Map<String, ChangeFeedProcessorItem> receivedDocuments) {
return docs -> {
log.info("START processing from thread in test {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {}", Thread.currentThread().getId());
};
}
private void waitToReceiveDocuments(Map<String, ChangeFeedProcessorItem> receivedDocuments, long timeoutInMillisecond, long count) throws InterruptedException {
long remainingWork = timeoutInMillisecond;
while (remainingWork > 0 && receivedDocuments.size() < count) {
remainingWork -= 100;
Thread.sleep(200);
}
assertThat(remainingWork > 0).as("Failed to receive all the feed documents").isTrue();
}
@BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT)
public void before_ChangeFeedProcessorTest() {
client = getClientBuilder().buildAsyncClient();
createdDatabase = getSharedCosmosDatabase(client);
}
@AfterClass(groups = { "emulator" }, timeOut = 2 * SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
private void setupReadFeedDocuments(List<InternalObjectNode> createdDocuments, Map<String, ChangeFeedProcessorItem> receivedDocuments, CosmosAsyncContainer feedCollection, long count) {
List<InternalObjectNode> docDefList = new ArrayList<>();
for(int i = 0; i < count; i++) {
InternalObjectNode item = getDocumentDefinition();
docDefList.add(item);
logger.info("Adding the following item to bulk list: {}", item);
}
createdDocuments.addAll(bulkInsertBlocking(feedCollection, docDefList));
waitIfNeededForReplicasToCatchUp(getClientBuilder());
}
private void createReadFeedDocuments(List<InternalObjectNode> createdDocuments, CosmosAsyncContainer feedCollection, long count) {
List<InternalObjectNode> docDefList = new ArrayList<>();
for(int i = 0; i < count; i++) {
docDefList.add(getDocumentDefinition());
}
createdDocuments.addAll(bulkInsertBlocking(feedCollection, docDefList));
waitIfNeededForReplicasToCatchUp(getClientBuilder());
}
private InternalObjectNode getDocumentDefinition() {
String uuid = UUID.randomUUID().toString();
InternalObjectNode doc = new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, uuid, uuid));
return doc;
}
private CosmosAsyncContainer createFeedCollection(int provisionedThroughput) {
CosmosContainerRequestOptions optionsFeedCollection = new CosmosContainerRequestOptions();
return createCollection(createdDatabase, getCollectionDefinitionWithFullFidelity(), optionsFeedCollection, provisionedThroughput);
}
private CosmosAsyncContainer createLeaseCollection(int provisionedThroughput) {
CosmosContainerRequestOptions options = new CosmosContainerRequestOptions();
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(
"leases_" + UUID.randomUUID(),
"/id");
return createCollection(createdDatabase, collectionDefinition, options, provisionedThroughput);
}
private CosmosAsyncContainer createLeaseMonitorCollection(int provisionedThroughput) {
CosmosContainerRequestOptions options = new CosmosContainerRequestOptions();
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(
"monitor_" + UUID.randomUUID(),
"/id");
return createCollection(createdDatabase, collectionDefinition, options, provisionedThroughput);
}
private Consumer<List<ChangeFeedProcessorItem>> leasesChangeFeedProcessorHandler(LeaseStateMonitor leaseStateMonitor) {
return docs -> {
log.info("LEASES processing from thread in test {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
try {
log
.debug("LEASE RECEIVED {}", OBJECT_MAPPER.writerWithDefaultPrettyPrinter().writeValueAsString(item));
} catch (JsonProcessingException e) {
log.error("Failure in processing json [{}]", e.getMessage(), e);
}
JsonNode leaseToken = item.getCurrent().get("LeaseToken");
if (leaseToken != null) {
JsonNode continuationTokenNode = item.getCurrent().get("ContinuationToken");
if (continuationTokenNode == null) {
log.error("Found invalid lease document");
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
else {
log.info("LEASE {} with continuation {}", leaseToken.asText(), continuationTokenNode.asText());
if (leaseStateMonitor.isAfterLeaseInitialization) {
String value = continuationTokenNode.asText().replaceAll("[^0-9]", "");
if (value.isEmpty()) {
log.error("Found unexpected continuation token that does not conform to the expected format");
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
long continuationToken = Long.parseLong(value);
if (leaseStateMonitor.parentContinuationToken > continuationToken) {
log.error("Found unexpected continuation token that did not advance after the split; parent: {}, current: {}");
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
}
}
leaseStateMonitor.receivedLeases.put(item.getCurrent().get("id").asText(), item.getCurrent());
}
}
log.info("LEASES processing from thread {}", Thread.currentThread().getId());
};
}
private static synchronized void processItem(ChangeFeedProcessorItem item, Map<String, ChangeFeedProcessorItem> receivedDocuments) {
log.info("RECEIVED {}", item);
receivedDocuments.put(item.getCurrent().get("id").asText(), item);
}
private static synchronized void processChangeFeedProcessorContext(
ChangeFeedProcessorContext context,
Set<String> receivedLeaseTokens) {
if (context == null) {
fail("The context cannot be null.");
}
if (context.getLeaseToken() == null || context.getLeaseToken().isEmpty()) {
fail("The lease token cannot be null or empty.");
}
receivedLeaseTokens.add(context.getLeaseToken());
}
class LeaseStateMonitor {
public Map<String, JsonNode> receivedLeases = new ConcurrentHashMap<>();
public volatile boolean isAfterLeaseInitialization = false;
public volatile boolean isAfterSplits = false;
public volatile long parentContinuationToken = 0;
public volatile boolean isContinuationTokenAdvancing = true;
}
} | class FullFidelityChangeFeedProcessorTest extends TestSuiteBase {
private final static Logger log = LoggerFactory.getLogger(FullFidelityChangeFeedProcessorTest.class);
private static final ObjectMapper OBJECT_MAPPER = Utils.getSimpleObjectMapper();
private CosmosAsyncDatabase createdDatabase;
private final String hostName = RandomStringUtils.randomAlphabetic(6);
private final int FEED_COUNT = 10;
private final int CHANGE_FEED_PROCESSOR_TIMEOUT = 5000;
private final int FEED_COLLECTION_THROUGHPUT = 400;
private final int FEED_COLLECTION_THROUGHPUT_FOR_SPLIT = 10100;
private final int LEASE_COLLECTION_THROUGHPUT = 400;
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuilders")
public FullFidelityChangeFeedProcessorTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@DataProvider
public Object[] contextTestConfigs() {
return new Object[] {true, false};
}
@Test(groups = { "emulator" }, dataProvider = "contextTestConfigs", timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT)
public void fullFidelityChangeFeedProcessorStartFromNow(boolean isContextRequired) throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
Set<String> receivedLeaseTokensFromContext = ConcurrentHashMap.newKeySet();
ChangeFeedProcessorOptions changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
ChangeFeedProcessorBuilder changeFeedProcessorBuilder = new ChangeFeedProcessorBuilder()
.options(changeFeedProcessorOptions)
.hostName(hostName)
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection);
if (isContextRequired) {
changeFeedProcessorBuilder = changeFeedProcessorBuilder
.handleAllVersionsAndDeletesChanges(changeFeedProcessorHandlerWithContext(receivedDocuments, receivedLeaseTokensFromContext));
} else {
changeFeedProcessorBuilder = changeFeedProcessorBuilder
.handleAllVersionsAndDeletesChanges(changeFeedProcessorHandler(receivedDocuments));
}
ChangeFeedProcessor changeFeedProcessor = changeFeedProcessorBuilder.buildChangeFeedProcessor();
try {
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
logger.info("Starting ChangeFeed processor");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Finished starting ChangeFeed processor");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
logger.info("Set up read feed documents");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Validating changes now");
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
String leaseQuery = "select * from c where not contains(c.id, \"info\")";
List<JsonNode> leaseDocuments =
createdLeaseCollection
.queryItems(leaseQuery, JsonNode.class)
.byPage()
.blockFirst()
.getResults();
List<String> leaseTokensCollectedFromLeaseCollection =
leaseDocuments.stream().map(lease -> lease.get("LeaseToken").asText()).collect(Collectors.toList());
if (isContextRequired) {
assertThat(leaseTokensCollectedFromLeaseCollection).isNotNull();
assertThat(receivedLeaseTokensFromContext.size()).isEqualTo(leaseTokensCollectedFromLeaseCollection.size());
assertThat(receivedLeaseTokensFromContext.containsAll(leaseTokensCollectedFromLeaseCollection)).isTrue();
}
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, dataProvider = "contextTestConfigs", timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT)
public void fullFidelityChangeFeedProcessorStartFromContinuationToken(boolean isContextRequired) throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
Set<String> receivedLeaseTokensFromContext = ConcurrentHashMap.newKeySet();
ChangeFeedProcessorOptions changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
ChangeFeedProcessorBuilder changeFeedProcessorBuilder = new ChangeFeedProcessorBuilder()
.options(changeFeedProcessorOptions)
.hostName(hostName)
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection);
if (isContextRequired) {
changeFeedProcessorBuilder = changeFeedProcessorBuilder.handleAllVersionsAndDeletesChanges(
changeFeedProcessorHandlerWithContext(receivedDocuments, receivedLeaseTokensFromContext));
} else {
changeFeedProcessorBuilder = changeFeedProcessorBuilder.handleAllVersionsAndDeletesChanges(
changeFeedProcessorHandler(receivedDocuments));
}
ChangeFeedProcessor changeFeedProcessor = changeFeedProcessorBuilder.buildChangeFeedProcessor();
try {
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
logger.info("Starting ChangeFeed processor");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Finished starting ChangeFeed processor");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
logger.info("Set up read feed documents");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Validating changes now");
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
String leaseQuery = "select * from c where not contains(c.id, \"info\")";
List<JsonNode> leaseDocuments =
createdLeaseCollection
.queryItems(leaseQuery, JsonNode.class)
.byPage()
.blockFirst()
.getResults();
List<String> leaseTokensCollectedFromLeaseCollection =
leaseDocuments.stream().map(lease -> lease.get("LeaseToken").asText()).collect(Collectors.toList());
if (isContextRequired) {
assertThat(leaseTokensCollectedFromLeaseCollection).isNotNull();
assertThat(receivedLeaseTokensFromContext.size()).isEqualTo(leaseTokensCollectedFromLeaseCollection.size());
assertThat(receivedLeaseTokensFromContext.containsAll(leaseTokensCollectedFromLeaseCollection)).isTrue();
}
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
public void getCurrentState() throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessor changeFeedProcessorMain = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
log.info("START processing from thread {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {}", Thread.currentThread().getId());
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.buildChangeFeedProcessor();
ChangeFeedProcessor changeFeedProcessorSideCart = new ChangeFeedProcessorBuilder()
.hostName("side-cart")
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
fail("ERROR - we should not execute this handler");
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.buildChangeFeedProcessor();
try {
changeFeedProcessorMain.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.then(Mono.just(changeFeedProcessorMain)
.delayElement(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.flatMap(value -> changeFeedProcessorMain.stop()
.subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
))
.subscribe();
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT);
List<ChangeFeedProcessorState> cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentState.size()).isNotZero().as("Change Feed Processor number of leases should not be 0.");
int totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Main estimated total lag at start");
List<ChangeFeedProcessorState> cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Side Cart estimated total lag at start");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Main estimated total lag");
cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Side Cart estimated total lag");
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
public void getCurrentStateWithInsertedDocuments() throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessor changeFeedProcessorMain = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
log.info("START processing from thread {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {}", Thread.currentThread().getId());
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.buildChangeFeedProcessor();
ChangeFeedProcessor changeFeedProcessorSideCart = new ChangeFeedProcessorBuilder()
.hostName("side-cart")
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
fail("ERROR - we should not execute this handler");
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.buildChangeFeedProcessor();
try {
changeFeedProcessorMain.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT);
List<ChangeFeedProcessorState> cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentState.size()).isNotZero().as("Change Feed Processor number of leases should not be 0.");
int totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Main estimated total lag at start");
List<ChangeFeedProcessorState> cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Side Cart estimated total lag at start");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info("Current state of main after inserting documents is : {}",
OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Main estimated total lag");
cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info("Current state of side cart after inserting documents is : {}",
OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Side Cart estimated total lag");
changeFeedProcessorMain.stop().subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info("Current state of main after stopping is : {}",
OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Main estimated total lag");
cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info("Current state of side cart after stopping is : {}",
OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Side Cart estimated total lag");
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
public void staledLeaseAcquiring() throws InterruptedException {
final String ownerFirst = "Owner_First";
final String ownerSecond = "Owner_Second";
final String leasePrefix = "TEST";
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessor changeFeedProcessorFirst = new ChangeFeedProcessorBuilder()
.hostName(ownerFirst)
.handleAllVersionsAndDeletesChanges(docs -> {
log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst);
log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst);
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix(leasePrefix)
)
.buildChangeFeedProcessor();
ChangeFeedProcessor changeFeedProcessorSecond = new ChangeFeedProcessorBuilder()
.hostName(ownerSecond)
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond);
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond);
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeaseRenewInterval(Duration.ofSeconds(10))
.setLeaseAcquireInterval(Duration.ofSeconds(5))
.setLeaseExpirationInterval(Duration.ofSeconds(20))
.setFeedPollDelay(Duration.ofSeconds(2))
.setLeasePrefix(leasePrefix)
.setMaxItemCount(10)
.setMaxScaleCount(0)
)
.buildChangeFeedProcessor();
changeFeedProcessorFirst
.start()
.subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.then(Mono.just(changeFeedProcessorFirst)
.delayElement(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.flatMap(value ->
changeFeedProcessorFirst.stop()
.subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
))
.subscribe();
try {
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted exception", e);
}
log.info("Update leases for Change feed processor in thread {} using host {}", Thread.currentThread().getId(), "Owner_first");
SqlParameter param = new SqlParameter();
param.setName("@PartitionLeasePrefix");
param.setValue(leasePrefix);
SqlQuerySpec querySpec = new SqlQuerySpec(
"SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Collections.singletonList(param));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
createdLeaseCollection
.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class).byPage()
.flatMap(documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults()))
.flatMap(doc -> {
ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc);
leaseDocument.setOwner("TEMP_OWNER");
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
return createdLeaseCollection.replaceItem(leaseDocument, leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), options)
.map(CosmosItemResponse::getItem);
})
.map(leaseDocument -> {
log.info("QueryItems after Change feed processor processing; found host {}", leaseDocument.getOwner());
return leaseDocument;
})
.blockLast();
changeFeedProcessorSecond
.start()
.subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT);
List<InternalObjectNode> docDefList = new ArrayList<>();
for (int i = 0; i < FEED_COUNT; i++) {
docDefList.add(getDocumentDefinition());
}
bulkInsert(createdFeedCollection, docDefList, FEED_COUNT).blockLast();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
long remainingWork = 10 * CHANGE_FEED_PROCESSOR_TIMEOUT;
while (remainingWork > 0 && changeFeedProcessorFirst.isStarted() && !changeFeedProcessorSecond.isStarted()) {
remainingWork -= 100;
Thread.sleep(100);
}
assertThat(changeFeedProcessorSecond.isStarted()).as("Change Feed Processor instance is running").isTrue();
waitToReceiveDocuments(receivedDocuments, 30 * CHANGE_FEED_PROCESSOR_TIMEOUT, FEED_COUNT);
changeFeedProcessorSecond.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("DONE");
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
public void ownerNullAcquiring() throws InterruptedException {
final String ownerFirst = "Owner_First";
final String leasePrefix = "TEST";
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessor changeFeedProcessorFirst = new ChangeFeedProcessorBuilder()
.hostName(ownerFirst)
.handleAllVersionsAndDeletesChanges(docs -> {
logger.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst);
for (ChangeFeedProcessorItem item : docs) {
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {
}
processItem(item, receivedDocuments);
}
logger.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst);
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix(leasePrefix)
.setLeaseRenewInterval(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.setLeaseAcquireInterval(Duration.ofMillis(5 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.setLeaseExpirationInterval(Duration.ofMillis(6 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.setFeedPollDelay(Duration.ofSeconds(5))
)
.buildChangeFeedProcessor();
try {
logger.info("Start more creating documents");
List<InternalObjectNode> docDefList = new ArrayList<>();
for (int i = 0; i < FEED_COUNT; i++) {
docDefList.add(getDocumentDefinition());
}
bulkInsert(createdFeedCollection, docDefList, FEED_COUNT)
.last()
.flatMap(cosmosItemResponse -> {
logger.info("Start first Change feed processor");
return changeFeedProcessorFirst.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT));
})
.then(
Mono.just(changeFeedProcessorFirst)
.flatMap( value -> {
logger.info("Update leases for Change feed processor in thread {} using host {}", Thread.currentThread().getId(), "Owner_first");
try {
Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (InterruptedException ignored) {
}
logger.info("QueryItems before Change feed processor processing");
SqlParameter param1 = new SqlParameter();
param1.setName("@PartitionLeasePrefix");
param1.setValue(leasePrefix);
SqlParameter param2 = new SqlParameter();
param2.setName("@Owner");
param2.setValue(ownerFirst);
SqlQuerySpec querySpec = new SqlQuerySpec(
"SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix) AND c.Owner=@Owner", Arrays.asList(param1, param2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
return createdLeaseCollection.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class).byPage()
.flatMap(documentFeedResponse -> reactor.core.publisher.Flux.fromIterable(documentFeedResponse.getResults()))
.flatMap(doc -> {
ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc);
leaseDocument.setOwner(null);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
return createdLeaseCollection.replaceItem(leaseDocument, leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), options)
.map(CosmosItemResponse::getItem);
})
.map(leaseDocument -> {
logger.info("QueryItems after Change feed processor processing; current Owner is'{}'", leaseDocument.getOwner());
return leaseDocument;
})
.last()
.flatMap(leaseDocument -> {
logger.info("Start creating more documents");
List<InternalObjectNode> docDefList1 = new ArrayList<>();
for (int i = 0; i < FEED_COUNT; i++) {
docDefList1.add(getDocumentDefinition());
}
return bulkInsert(createdFeedCollection, docDefList1, FEED_COUNT)
.last();
});
}))
.subscribe();
} catch (Exception ex) {
log.error("First change feed processor did not start in the expected time", ex);
throw ex;
}
long remainingWork = 20 * CHANGE_FEED_PROCESSOR_TIMEOUT;
while (remainingWork > 0 && !changeFeedProcessorFirst.isStarted()) {
remainingWork -= 100;
Thread.sleep(100);
}
waitToReceiveDocuments(receivedDocuments, 30 * CHANGE_FEED_PROCESSOR_TIMEOUT, FEED_COUNT);
assertThat(changeFeedProcessorFirst.isStarted()).as("Change Feed Processor instance is running").isTrue();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
changeFeedProcessorFirst.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 20 * TIMEOUT, enabled = false)
public void inactiveOwnersRecovery() throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
String leasePrefix = "TEST";
ChangeFeedProcessor changeFeedProcessor = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.handleAllVersionsAndDeletesChanges(fullFidelityChangeFeedProcessorHandler(receivedDocuments))
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeaseRenewInterval(Duration.ofSeconds(1))
.setLeaseAcquireInterval(Duration.ofSeconds(1))
.setLeaseExpirationInterval(Duration.ofSeconds(5))
.setFeedPollDelay(Duration.ofSeconds(1))
.setLeasePrefix(leasePrefix)
.setMaxItemCount(100)
.setMaxScaleCount(0)
.setScheduler(Schedulers.newParallel("CFP parallel",
10 * Schedulers.DEFAULT_POOL_SIZE,
true))
)
.buildChangeFeedProcessor();
try {
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (Exception ex) {
log.error("Change feed processor did not start in the expected time", ex);
throw ex;
}
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments,2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT);
log.info("Update leases with random owners");
SqlParameter param1 = new SqlParameter();
param1.setName("@PartitionLeasePrefix");
param1.setValue(leasePrefix);
SqlQuerySpec querySpec = new SqlQuerySpec(
"SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Arrays.asList(param1));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
createdLeaseCollection.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class).byPage()
.flatMap(documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults()))
.flatMap(doc -> {
ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc);
leaseDocument.setOwner(RandomStringUtils.randomAlphabetic(10));
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
return createdLeaseCollection.replaceItem(leaseDocument, leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), options)
.map(CosmosItemResponse::getItem);
})
.flatMap(leaseDocument -> createdLeaseCollection.readItem(leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), InternalObjectNode.class))
.map(doc -> {
ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc.getItem());
log.info("Change feed processor current Owner is'{}'", leaseDocument.getOwner());
return leaseDocument;
})
.blockLast();
createdDocuments.clear();
receivedDocuments.clear();
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT);
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT)
public void endToEndTimeoutConfigShouldBeSuppressed() throws InterruptedException {
CosmosAsyncClient clientWithE2ETimeoutConfig = null;
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
clientWithE2ETimeoutConfig = this.getClientBuilder()
.endToEndOperationLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofMillis(1)).build())
.contentResponseOnWriteEnabled(true)
.buildAsyncClient();
CosmosAsyncDatabase testDatabase = clientWithE2ETimeoutConfig.getDatabase(this.createdDatabase.getId());
CosmosAsyncContainer createdFeedCollectionDuplicate = testDatabase.getContainer(createdFeedCollection.getId());
CosmosAsyncContainer createdLeaseCollectionDuplicate = testDatabase.getContainer(createdLeaseCollection.getId());
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessorOptions changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
ChangeFeedProcessor changeFeedProcessor = new ChangeFeedProcessorBuilder()
.options(changeFeedProcessorOptions)
.hostName(hostName)
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
log.info("START processing from thread {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {}", Thread.currentThread().getId());
})
.feedContainer(createdFeedCollectionDuplicate)
.leaseContainer(createdLeaseCollectionDuplicate)
.buildChangeFeedProcessor();
try {
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
logger.info("Starting ChangeFeed processor");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Finished starting ChangeFeed processor");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
logger.info("Set up read feed documents");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Validating changes now");
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
safeClose(clientWithE2ETimeoutConfig);
Thread.sleep(500);
}
}
@Test(groups = { "cfp-split" }, dataProvider = "contextTestConfigs", timeOut = 160 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
private Consumer<List<ChangeFeedProcessorItem>> changeFeedProcessorHandler(Map<String, ChangeFeedProcessorItem> receivedDocuments) {
return docs -> {
logger.info("START processing from thread in test {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
logger.info("END processing from thread {}", Thread.currentThread().getId());
};
}
private BiConsumer<List<ChangeFeedProcessorItem>, ChangeFeedProcessorContext> changeFeedProcessorHandlerWithContext(
Map<String, ChangeFeedProcessorItem> receivedDocuments, Set<String> receivedLeaseTokensFromContext) {
return (docs, context) -> {
logger.info("START processing from thread in test {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
validateChangeFeedProcessorContext(context);
processChangeFeedProcessorContext(context, receivedLeaseTokensFromContext);
logger.info("END processing from thread {}", Thread.currentThread().getId());
};
}
void validateChangeFeedProcessing(ChangeFeedProcessor changeFeedProcessor, List<InternalObjectNode> createdDocuments, Map<String, ChangeFeedProcessorItem> receivedDocuments, int sleepTime) throws InterruptedException {
assertThat(changeFeedProcessor.isStarted()).as("Change Feed Processor instance is running").isTrue();
List<ChangeFeedProcessorState> cfpCurrentState = changeFeedProcessor
.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writerWithDefaultPrettyPrinter().writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
})
.block();
assertThat(cfpCurrentState).isNotNull().as("Change Feed Processor current state");
for (ChangeFeedProcessorState item : cfpCurrentState) {
assertThat(item.getHostName()).isEqualTo(hostName).as("Change Feed Processor ownership");
}
assertThat(receivedDocuments.size()).isEqualTo(FEED_COUNT);
for (InternalObjectNode item : createdDocuments) {
assertThat(receivedDocuments.containsKey(item.getId())).as("Document with getId: " + item.getId()).isTrue();
}
}
void validateChangeFeedProcessorContext(ChangeFeedProcessorContext changeFeedProcessorContext) {
String leaseToken = changeFeedProcessorContext.getLeaseToken();
assertThat(leaseToken).isNotNull();
}
private Consumer<List<ChangeFeedProcessorItem>> fullFidelityChangeFeedProcessorHandler(Map<String, ChangeFeedProcessorItem> receivedDocuments) {
return docs -> {
log.info("START processing from thread in test {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {}", Thread.currentThread().getId());
};
}
private void waitToReceiveDocuments(Map<String, ChangeFeedProcessorItem> receivedDocuments, long timeoutInMillisecond, long count) throws InterruptedException {
long remainingWork = timeoutInMillisecond;
while (remainingWork > 0 && receivedDocuments.size() < count) {
remainingWork -= 100;
Thread.sleep(200);
}
assertThat(remainingWork > 0).as("Failed to receive all the feed documents").isTrue();
}
@BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT)
public void before_ChangeFeedProcessorTest() {
client = getClientBuilder().buildAsyncClient();
createdDatabase = getSharedCosmosDatabase(client);
}
@AfterClass(groups = { "emulator" }, timeOut = 2 * SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
private void setupReadFeedDocuments(List<InternalObjectNode> createdDocuments, Map<String, ChangeFeedProcessorItem> receivedDocuments, CosmosAsyncContainer feedCollection, long count) {
List<InternalObjectNode> docDefList = new ArrayList<>();
for(int i = 0; i < count; i++) {
InternalObjectNode item = getDocumentDefinition();
docDefList.add(item);
logger.info("Adding the following item to bulk list: {}", item);
}
createdDocuments.addAll(bulkInsertBlocking(feedCollection, docDefList));
waitIfNeededForReplicasToCatchUp(getClientBuilder());
}
private void createReadFeedDocuments(List<InternalObjectNode> createdDocuments, CosmosAsyncContainer feedCollection, long count) {
List<InternalObjectNode> docDefList = new ArrayList<>();
for(int i = 0; i < count; i++) {
docDefList.add(getDocumentDefinition());
}
createdDocuments.addAll(bulkInsertBlocking(feedCollection, docDefList));
waitIfNeededForReplicasToCatchUp(getClientBuilder());
}
private InternalObjectNode getDocumentDefinition() {
String uuid = UUID.randomUUID().toString();
InternalObjectNode doc = new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, uuid, uuid));
return doc;
}
private CosmosAsyncContainer createFeedCollection(int provisionedThroughput) {
CosmosContainerRequestOptions optionsFeedCollection = new CosmosContainerRequestOptions();
return createCollection(createdDatabase, getCollectionDefinitionWithFullFidelity(), optionsFeedCollection, provisionedThroughput);
}
private CosmosAsyncContainer createLeaseCollection(int provisionedThroughput) {
CosmosContainerRequestOptions options = new CosmosContainerRequestOptions();
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(
"leases_" + UUID.randomUUID(),
"/id");
return createCollection(createdDatabase, collectionDefinition, options, provisionedThroughput);
}
private CosmosAsyncContainer createLeaseMonitorCollection(int provisionedThroughput) {
CosmosContainerRequestOptions options = new CosmosContainerRequestOptions();
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(
"monitor_" + UUID.randomUUID(),
"/id");
return createCollection(createdDatabase, collectionDefinition, options, provisionedThroughput);
}
private Consumer<List<ChangeFeedProcessorItem>> leasesChangeFeedProcessorHandler(LeaseStateMonitor leaseStateMonitor) {
return docs -> {
log.info("LEASES processing from thread in test {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
try {
log
.debug("LEASE RECEIVED {}", OBJECT_MAPPER.writerWithDefaultPrettyPrinter().writeValueAsString(item));
} catch (JsonProcessingException e) {
log.error("Failure in processing json [{}]", e.getMessage(), e);
}
JsonNode leaseToken = item.getCurrent().get("LeaseToken");
if (leaseToken != null) {
JsonNode continuationTokenNode = item.getCurrent().get("ContinuationToken");
if (continuationTokenNode == null) {
log.error("Found invalid lease document");
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
else {
log.info("LEASE {} with continuation {}", leaseToken.asText(), continuationTokenNode.asText());
if (leaseStateMonitor.isAfterLeaseInitialization) {
String value = continuationTokenNode.asText().replaceAll("[^0-9]", "");
if (value.isEmpty()) {
log.error("Found unexpected continuation token that does not conform to the expected format");
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
long continuationToken = Long.parseLong(value);
if (leaseStateMonitor.parentContinuationToken > continuationToken) {
log.error("Found unexpected continuation token that did not advance after the split; parent: {}, current: {}");
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
}
}
leaseStateMonitor.receivedLeases.put(item.getCurrent().get("id").asText(), item.getCurrent());
}
}
log.info("LEASES processing from thread {}", Thread.currentThread().getId());
};
}
private static synchronized void processItem(ChangeFeedProcessorItem item, Map<String, ChangeFeedProcessorItem> receivedDocuments) {
log.info("RECEIVED {}", item);
receivedDocuments.put(item.getCurrent().get("id").asText(), item);
}
private static synchronized void processChangeFeedProcessorContext(
ChangeFeedProcessorContext context,
Set<String> receivedLeaseTokens) {
if (context == null) {
fail("The context cannot be null.");
}
if (context.getLeaseToken() == null || context.getLeaseToken().isEmpty()) {
fail("The lease token cannot be null or empty.");
}
receivedLeaseTokens.add(context.getLeaseToken());
}
class LeaseStateMonitor {
public Map<String, JsonNode> receivedLeases = new ConcurrentHashMap<>();
public volatile boolean isAfterLeaseInitialization = false;
public volatile boolean isAfterSplits = false;
public volatile long parentContinuationToken = 0;
public volatile boolean isContinuationTokenAdvancing = true;
}
} |
Fixed in next iteration. | public void readFeedDocumentsAfterSplit(boolean isContextRequired) throws InterruptedException {
CosmosAsyncContainer createdFeedCollectionForSplit = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(2 * LEASE_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseMonitorCollection = createLeaseMonitorCollection(LEASE_COLLECTION_THROUGHPUT);
CosmosAsyncClient clientWithStaleCache = null;
try {
clientWithStaleCache = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.buildAsyncClient();
CosmosAsyncDatabase databaseFromStaleClient =
clientWithStaleCache.getDatabase(createdFeedCollectionForSplit.getDatabase().getId());
CosmosAsyncContainer feedCollectionFromStaleClient =
databaseFromStaleClient.getContainer(createdFeedCollectionForSplit.getId());
CosmosAsyncContainer leaseCollectionFromStaleClient =
databaseFromStaleClient.getContainer(createdLeaseCollection.getId());
ChangeFeedProcessor staleChangeFeedProcessor = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.feedContainer(feedCollectionFromStaleClient)
.leaseContainer(leaseCollectionFromStaleClient)
.handleAllVersionsAndDeletesChanges(changeFeedProcessorItems -> {})
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix("TEST")
.setStartFromBeginning(false))
.buildChangeFeedProcessor();
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
Set<String> receivedLeaseTokensFromContext = ConcurrentHashMap.newKeySet();
Set<String> queriedLeaseTokensFromLeaseCollection = ConcurrentHashMap.newKeySet();
LeaseStateMonitor leaseStateMonitor = new LeaseStateMonitor();
ChangeFeedProcessor leaseMonitoringChangeFeedProcessor = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.handleLatestVersionChanges(leasesChangeFeedProcessorHandler(leaseStateMonitor))
.feedContainer(createdLeaseCollection)
.leaseContainer(createdLeaseMonitorCollection)
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix("MONITOR")
.setStartFromBeginning(true)
.setMaxItemCount(10)
.setLeaseRenewInterval(Duration.ofSeconds(2))
).buildChangeFeedProcessor();
ChangeFeedProcessorBuilder changeFeedProcessorBuilderForFeedMonitoring = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.feedContainer(createdFeedCollectionForSplit)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix("TEST")
.setStartFromBeginning(false));
if (isContextRequired) {
changeFeedProcessorBuilderForFeedMonitoring = changeFeedProcessorBuilderForFeedMonitoring
.handleAllVersionsAndDeletesChanges(changeFeedProcessorHandlerWithContext(receivedDocuments, receivedLeaseTokensFromContext));
} else {
changeFeedProcessorBuilderForFeedMonitoring = changeFeedProcessorBuilderForFeedMonitoring
.handleAllVersionsAndDeletesChanges(changeFeedProcessorHandler(receivedDocuments));
}
ChangeFeedProcessor changeFeedProcessor = changeFeedProcessorBuilderForFeedMonitoring.buildChangeFeedProcessor();
leaseMonitoringChangeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(200 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.onErrorResume(throwable -> {
logger.error("Change feed processor for lease monitoring did not start in the expected time", throwable);
return Mono.error(throwable);
})
.then(
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.onErrorResume(throwable -> {
logger.error("Change feed processor did not start in the expected time", throwable);
return Mono.error(throwable);
}))
.subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollectionForSplit, FEED_COUNT);
staleChangeFeedProcessor.getCurrentState().block();
Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT);
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
String leaseQuery = "select * from c where not contains(c.id, \"info\")";
List<JsonNode> leaseDocuments =
createdLeaseCollection
.queryItems(leaseQuery, JsonNode.class)
.byPage()
.blockFirst()
.getResults();
queriedLeaseTokensFromLeaseCollection
.addAll(leaseDocuments.stream().map(lease -> lease.get("LeaseToken").asText()).collect(Collectors.toList()));
createdFeedCollectionForSplit
.readThroughput().subscribeOn(Schedulers.boundedElastic())
.flatMap(currentThroughput ->
createdFeedCollectionForSplit
.replaceThroughput(ThroughputProperties.createManualThroughput(FEED_COLLECTION_THROUGHPUT_FOR_SPLIT))
.subscribeOn(Schedulers.boundedElastic())
).subscribe();
long continuationToken = Long.MAX_VALUE;
for (JsonNode item : leaseStateMonitor.receivedLeases.values()) {
JsonNode tempToken = item.get("ContinuationToken");
long continuationTokenValue = 0;
if (tempToken != null && StringUtils.isNotEmpty(tempToken.asText())) {
ChangeFeedState changeFeedState = ChangeFeedState.fromString(tempToken.asText());
continuationTokenValue =
Long.parseLong(changeFeedState.getContinuation().getCurrentContinuationToken().getToken().replace("\"", ""));
}
if (tempToken == null || continuationTokenValue == 0) {
logger.error("Found unexpected lease with continuation token value of null or 0");
try {
logger.info("ERROR LEASE FOUND {}", OBJECT_MAPPER.writerWithDefaultPrettyPrinter().writeValueAsString(item));
} catch (JsonProcessingException e) {
logger.error("Failure in processing json [{}]", e.getMessage(), e);
}
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
else {
if (continuationToken > continuationTokenValue) {
continuationToken = continuationTokenValue;
}
}
}
if (continuationToken == Long.MAX_VALUE) {
logger.error("Could not find any valid lease documents");
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
else {
leaseStateMonitor.parentContinuationToken = continuationToken;
}
leaseStateMonitor.isAfterLeaseInitialization = true;
String partitionKeyRangesPath = extractContainerSelfLink(createdFeedCollectionForSplit);
AsyncDocumentClient contextClient = getContextClient(createdDatabase);
Flux.just(1).subscribeOn(Schedulers.boundedElastic())
.flatMap(value -> {
logger.warn("Reading current throughput change.");
return contextClient.readPartitionKeyRanges(partitionKeyRangesPath, (CosmosQueryRequestOptions) null);
})
.map(partitionKeyRangeFeedResponse -> {
int count = partitionKeyRangeFeedResponse.getResults().size();
if (count < 2) {
logger.warn("Throughput change is pending.");
throw new RuntimeException("Throughput change is not done.");
}
return count;
})
.retryWhen(Retry.max(40).filter(throwable -> {
try {
logger.warn("Retrying...");
Thread.sleep(10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted exception", e);
}
return true;
}))
.last()
.doOnSuccess(partitionCount -> {
leaseStateMonitor.isAfterSplits = true;
})
.block();
assertThat(changeFeedProcessor.isStarted()).as("Change Feed Processor instance is running").isTrue();
createReadFeedDocuments(createdDocuments, createdFeedCollectionForSplit, FEED_COUNT);
waitToReceiveDocuments(receivedDocuments, 2 * CHANGE_FEED_PROCESSOR_TIMEOUT, FEED_COUNT * 2);
changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
leaseMonitoringChangeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
int leaseCount = changeFeedProcessor.getCurrentState().map(List::size).block();
assertThat(leaseCount > 1).as("Found %d leases", leaseCount).isTrue();
int leaseCountFromStaleCfp = staleChangeFeedProcessor.getCurrentState().map(List::size).block();
assertThat(leaseCountFromStaleCfp).isEqualTo(leaseCount);
assertThat(receivedDocuments.size()).isEqualTo(createdDocuments.size());
for (InternalObjectNode item : createdDocuments) {
assertThat(receivedDocuments.containsKey(item.getId())).as("Document with getId: " + item.getId()).isTrue();
}
assertThat(leaseStateMonitor.isContinuationTokenAdvancing && leaseStateMonitor.parentContinuationToken > 0)
.as("Continuation tokens for the leases after split should advance from parent value; parent: %d", leaseStateMonitor.parentContinuationToken).isTrue();
leaseDocuments = createdLeaseCollection
.queryItems(leaseQuery, JsonNode.class)
.byPage()
.blockFirst()
.getResults();
queriedLeaseTokensFromLeaseCollection.addAll(
leaseDocuments.stream().map(lease -> lease.get("LeaseToken").asText()).collect(Collectors.toList()));
if (isContextRequired) {
assertThat(receivedLeaseTokensFromContext.size())
.isEqualTo(queriedLeaseTokensFromLeaseCollection.size());
assertThat(receivedLeaseTokensFromContext.containsAll(queriedLeaseTokensFromLeaseCollection)).isTrue();
}
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} finally {
System.out.println("Start to delete FeedCollectionForSplit");
safeDeleteCollection(createdFeedCollectionForSplit);
safeDeleteCollection(createdLeaseCollection);
if (clientWithStaleCache != null) {
safeClose(clientWithStaleCache);
}
Thread.sleep(500);
}
} | safeClose(clientWithStaleCache); | public void readFeedDocumentsAfterSplit(boolean isContextRequired) throws InterruptedException {
CosmosAsyncContainer createdFeedCollectionForSplit = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(2 * LEASE_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseMonitorCollection = createLeaseMonitorCollection(LEASE_COLLECTION_THROUGHPUT);
CosmosAsyncClient clientWithStaleCache = null;
try {
clientWithStaleCache = new CosmosClientBuilder()
.endpoint(TestConfigurations.HOST)
.key(TestConfigurations.MASTER_KEY)
.contentResponseOnWriteEnabled(true)
.buildAsyncClient();
CosmosAsyncDatabase databaseFromStaleClient =
clientWithStaleCache.getDatabase(createdFeedCollectionForSplit.getDatabase().getId());
CosmosAsyncContainer feedCollectionFromStaleClient =
databaseFromStaleClient.getContainer(createdFeedCollectionForSplit.getId());
CosmosAsyncContainer leaseCollectionFromStaleClient =
databaseFromStaleClient.getContainer(createdLeaseCollection.getId());
ChangeFeedProcessor staleChangeFeedProcessor = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.feedContainer(feedCollectionFromStaleClient)
.leaseContainer(leaseCollectionFromStaleClient)
.handleAllVersionsAndDeletesChanges(changeFeedProcessorItems -> {})
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix("TEST")
.setStartFromBeginning(false))
.buildChangeFeedProcessor();
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
Set<String> receivedLeaseTokensFromContext = ConcurrentHashMap.newKeySet();
Set<String> queriedLeaseTokensFromLeaseCollection = ConcurrentHashMap.newKeySet();
LeaseStateMonitor leaseStateMonitor = new LeaseStateMonitor();
ChangeFeedProcessor leaseMonitoringChangeFeedProcessor = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.handleLatestVersionChanges(leasesChangeFeedProcessorHandler(leaseStateMonitor))
.feedContainer(createdLeaseCollection)
.leaseContainer(createdLeaseMonitorCollection)
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix("MONITOR")
.setStartFromBeginning(true)
.setMaxItemCount(10)
.setLeaseRenewInterval(Duration.ofSeconds(2))
).buildChangeFeedProcessor();
ChangeFeedProcessorBuilder changeFeedProcessorBuilderForFeedMonitoring = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.feedContainer(createdFeedCollectionForSplit)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix("TEST")
.setStartFromBeginning(false));
if (isContextRequired) {
changeFeedProcessorBuilderForFeedMonitoring = changeFeedProcessorBuilderForFeedMonitoring
.handleAllVersionsAndDeletesChanges(changeFeedProcessorHandlerWithContext(receivedDocuments, receivedLeaseTokensFromContext));
} else {
changeFeedProcessorBuilderForFeedMonitoring = changeFeedProcessorBuilderForFeedMonitoring
.handleAllVersionsAndDeletesChanges(changeFeedProcessorHandler(receivedDocuments));
}
ChangeFeedProcessor changeFeedProcessor = changeFeedProcessorBuilderForFeedMonitoring.buildChangeFeedProcessor();
leaseMonitoringChangeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(200 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.onErrorResume(throwable -> {
logger.error("Change feed processor for lease monitoring did not start in the expected time", throwable);
return Mono.error(throwable);
})
.then(
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.onErrorResume(throwable -> {
logger.error("Change feed processor did not start in the expected time", throwable);
return Mono.error(throwable);
}))
.subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollectionForSplit, FEED_COUNT);
staleChangeFeedProcessor.getCurrentState().block();
Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT);
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
String leaseQuery = "select * from c where not contains(c.id, \"info\")";
List<JsonNode> leaseDocuments =
createdLeaseCollection
.queryItems(leaseQuery, JsonNode.class)
.byPage()
.blockFirst()
.getResults();
queriedLeaseTokensFromLeaseCollection
.addAll(leaseDocuments.stream().map(lease -> lease.get("LeaseToken").asText()).collect(Collectors.toList()));
createdFeedCollectionForSplit
.readThroughput().subscribeOn(Schedulers.boundedElastic())
.flatMap(currentThroughput ->
createdFeedCollectionForSplit
.replaceThroughput(ThroughputProperties.createManualThroughput(FEED_COLLECTION_THROUGHPUT_FOR_SPLIT))
.subscribeOn(Schedulers.boundedElastic())
).subscribe();
long continuationToken = Long.MAX_VALUE;
for (JsonNode item : leaseStateMonitor.receivedLeases.values()) {
JsonNode tempToken = item.get("ContinuationToken");
long continuationTokenValue = 0;
if (tempToken != null && StringUtils.isNotEmpty(tempToken.asText())) {
ChangeFeedState changeFeedState = ChangeFeedState.fromString(tempToken.asText());
continuationTokenValue =
Long.parseLong(changeFeedState.getContinuation().getCurrentContinuationToken().getToken().replace("\"", ""));
}
if (tempToken == null || continuationTokenValue == 0) {
logger.error("Found unexpected lease with continuation token value of null or 0");
try {
logger.info("ERROR LEASE FOUND {}", OBJECT_MAPPER.writerWithDefaultPrettyPrinter().writeValueAsString(item));
} catch (JsonProcessingException e) {
logger.error("Failure in processing json [{}]", e.getMessage(), e);
}
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
else {
if (continuationToken > continuationTokenValue) {
continuationToken = continuationTokenValue;
}
}
}
if (continuationToken == Long.MAX_VALUE) {
logger.error("Could not find any valid lease documents");
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
else {
leaseStateMonitor.parentContinuationToken = continuationToken;
}
leaseStateMonitor.isAfterLeaseInitialization = true;
String partitionKeyRangesPath = extractContainerSelfLink(createdFeedCollectionForSplit);
AsyncDocumentClient contextClient = getContextClient(createdDatabase);
Flux.just(1).subscribeOn(Schedulers.boundedElastic())
.flatMap(value -> {
logger.warn("Reading current throughput change.");
return contextClient.readPartitionKeyRanges(partitionKeyRangesPath, (CosmosQueryRequestOptions) null);
})
.map(partitionKeyRangeFeedResponse -> {
int count = partitionKeyRangeFeedResponse.getResults().size();
if (count < 2) {
logger.warn("Throughput change is pending.");
throw new RuntimeException("Throughput change is not done.");
}
return count;
})
.retryWhen(Retry.max(40).filter(throwable -> {
try {
logger.warn("Retrying...");
Thread.sleep(10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted exception", e);
}
return true;
}))
.last()
.doOnSuccess(partitionCount -> {
leaseStateMonitor.isAfterSplits = true;
})
.block();
assertThat(changeFeedProcessor.isStarted()).as("Change Feed Processor instance is running").isTrue();
createReadFeedDocuments(createdDocuments, createdFeedCollectionForSplit, FEED_COUNT);
waitToReceiveDocuments(receivedDocuments, 2 * CHANGE_FEED_PROCESSOR_TIMEOUT, FEED_COUNT * 2);
changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
leaseMonitoringChangeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
int leaseCount = changeFeedProcessor.getCurrentState().map(List::size).block();
assertThat(leaseCount > 1).as("Found %d leases", leaseCount).isTrue();
int leaseCountFromStaleCfp = staleChangeFeedProcessor.getCurrentState().map(List::size).block();
assertThat(leaseCountFromStaleCfp).isEqualTo(leaseCount);
assertThat(receivedDocuments.size()).isEqualTo(createdDocuments.size());
for (InternalObjectNode item : createdDocuments) {
assertThat(receivedDocuments.containsKey(item.getId())).as("Document with getId: " + item.getId()).isTrue();
}
assertThat(leaseStateMonitor.isContinuationTokenAdvancing && leaseStateMonitor.parentContinuationToken > 0)
.as("Continuation tokens for the leases after split should advance from parent value; parent: %d", leaseStateMonitor.parentContinuationToken).isTrue();
leaseDocuments = createdLeaseCollection
.queryItems(leaseQuery, JsonNode.class)
.byPage()
.blockFirst()
.getResults();
queriedLeaseTokensFromLeaseCollection.addAll(
leaseDocuments.stream().map(lease -> lease.get("LeaseToken").asText()).collect(Collectors.toList()));
if (isContextRequired) {
assertThat(receivedLeaseTokensFromContext.size())
.isEqualTo(queriedLeaseTokensFromLeaseCollection.size());
assertThat(receivedLeaseTokensFromContext.containsAll(queriedLeaseTokensFromLeaseCollection)).isTrue();
}
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} finally {
System.out.println("Start to delete FeedCollectionForSplit");
safeDeleteCollection(createdFeedCollectionForSplit);
safeDeleteCollection(createdLeaseCollection);
safeClose(clientWithStaleCache);
Thread.sleep(500);
}
} | class FullFidelityChangeFeedProcessorTest extends TestSuiteBase {
private final static Logger log = LoggerFactory.getLogger(FullFidelityChangeFeedProcessorTest.class);
private static final ObjectMapper OBJECT_MAPPER = Utils.getSimpleObjectMapper();
private CosmosAsyncDatabase createdDatabase;
private final String hostName = RandomStringUtils.randomAlphabetic(6);
private final int FEED_COUNT = 10;
private final int CHANGE_FEED_PROCESSOR_TIMEOUT = 5000;
private final int FEED_COLLECTION_THROUGHPUT = 400;
private final int FEED_COLLECTION_THROUGHPUT_FOR_SPLIT = 10100;
private final int LEASE_COLLECTION_THROUGHPUT = 400;
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuilders")
public FullFidelityChangeFeedProcessorTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@DataProvider
public Object[] contextTestConfigs() {
return new Object[] {true, false};
}
@Test(groups = { "emulator" }, dataProvider = "contextTestConfigs", timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT)
public void fullFidelityChangeFeedProcessorStartFromNow(boolean isContextRequired) throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
Set<String> receivedLeaseTokensFromContext = ConcurrentHashMap.newKeySet();
ChangeFeedProcessorOptions changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
ChangeFeedProcessorBuilder changeFeedProcessorBuilder = new ChangeFeedProcessorBuilder()
.options(changeFeedProcessorOptions)
.hostName(hostName)
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection);
if (isContextRequired) {
changeFeedProcessorBuilder = changeFeedProcessorBuilder
.handleAllVersionsAndDeletesChanges(changeFeedProcessorHandlerWithContext(receivedDocuments, receivedLeaseTokensFromContext));
} else {
changeFeedProcessorBuilder = changeFeedProcessorBuilder
.handleAllVersionsAndDeletesChanges(changeFeedProcessorHandler(receivedDocuments));
}
ChangeFeedProcessor changeFeedProcessor = changeFeedProcessorBuilder.buildChangeFeedProcessor();
try {
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
logger.info("Starting ChangeFeed processor");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Finished starting ChangeFeed processor");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
logger.info("Set up read feed documents");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Validating changes now");
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
String leaseQuery = "select * from c where not contains(c.id, \"info\")";
List<JsonNode> leaseDocuments =
createdLeaseCollection
.queryItems(leaseQuery, JsonNode.class)
.byPage()
.blockFirst()
.getResults();
List<String> leaseTokensCollectedFromLeaseCollection =
leaseDocuments.stream().map(lease -> lease.get("LeaseToken").asText()).collect(Collectors.toList());
if (isContextRequired) {
assertThat(leaseTokensCollectedFromLeaseCollection).isNotNull();
assertThat(receivedLeaseTokensFromContext.size()).isEqualTo(leaseTokensCollectedFromLeaseCollection.size());
assertThat(receivedLeaseTokensFromContext.containsAll(leaseTokensCollectedFromLeaseCollection)).isTrue();
}
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, dataProvider = "contextTestConfigs", timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT)
public void fullFidelityChangeFeedProcessorStartFromContinuationToken(boolean isContextRequired) throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
Set<String> receivedLeaseTokensFromContext = ConcurrentHashMap.newKeySet();
ChangeFeedProcessorOptions changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
ChangeFeedProcessorBuilder changeFeedProcessorBuilder = new ChangeFeedProcessorBuilder()
.options(changeFeedProcessorOptions)
.hostName(hostName)
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection);
if (isContextRequired) {
changeFeedProcessorBuilder = changeFeedProcessorBuilder.handleAllVersionsAndDeletesChanges(
changeFeedProcessorHandlerWithContext(receivedDocuments, receivedLeaseTokensFromContext));
} else {
changeFeedProcessorBuilder = changeFeedProcessorBuilder.handleAllVersionsAndDeletesChanges(
changeFeedProcessorHandler(receivedDocuments));
}
ChangeFeedProcessor changeFeedProcessor = changeFeedProcessorBuilder.buildChangeFeedProcessor();
try {
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
logger.info("Starting ChangeFeed processor");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Finished starting ChangeFeed processor");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
logger.info("Set up read feed documents");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Validating changes now");
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
String leaseQuery = "select * from c where not contains(c.id, \"info\")";
List<JsonNode> leaseDocuments =
createdLeaseCollection
.queryItems(leaseQuery, JsonNode.class)
.byPage()
.blockFirst()
.getResults();
List<String> leaseTokensCollectedFromLeaseCollection =
leaseDocuments.stream().map(lease -> lease.get("LeaseToken").asText()).collect(Collectors.toList());
if (isContextRequired) {
assertThat(leaseTokensCollectedFromLeaseCollection).isNotNull();
assertThat(receivedLeaseTokensFromContext.size()).isEqualTo(leaseTokensCollectedFromLeaseCollection.size());
assertThat(receivedLeaseTokensFromContext.containsAll(leaseTokensCollectedFromLeaseCollection)).isTrue();
}
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
public void getCurrentState() throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessor changeFeedProcessorMain = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
log.info("START processing from thread {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {}", Thread.currentThread().getId());
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.buildChangeFeedProcessor();
ChangeFeedProcessor changeFeedProcessorSideCart = new ChangeFeedProcessorBuilder()
.hostName("side-cart")
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
fail("ERROR - we should not execute this handler");
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.buildChangeFeedProcessor();
try {
changeFeedProcessorMain.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.then(Mono.just(changeFeedProcessorMain)
.delayElement(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.flatMap(value -> changeFeedProcessorMain.stop()
.subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
))
.subscribe();
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT);
List<ChangeFeedProcessorState> cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentState.size()).isNotZero().as("Change Feed Processor number of leases should not be 0.");
int totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Main estimated total lag at start");
List<ChangeFeedProcessorState> cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Side Cart estimated total lag at start");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Main estimated total lag");
cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Side Cart estimated total lag");
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
public void getCurrentStateWithInsertedDocuments() throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessor changeFeedProcessorMain = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
log.info("START processing from thread {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {}", Thread.currentThread().getId());
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.buildChangeFeedProcessor();
ChangeFeedProcessor changeFeedProcessorSideCart = new ChangeFeedProcessorBuilder()
.hostName("side-cart")
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
fail("ERROR - we should not execute this handler");
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.buildChangeFeedProcessor();
try {
changeFeedProcessorMain.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT);
List<ChangeFeedProcessorState> cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentState.size()).isNotZero().as("Change Feed Processor number of leases should not be 0.");
int totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Main estimated total lag at start");
List<ChangeFeedProcessorState> cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Side Cart estimated total lag at start");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info("Current state of main after inserting documents is : {}",
OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Main estimated total lag");
cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info("Current state of side cart after inserting documents is : {}",
OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Side Cart estimated total lag");
changeFeedProcessorMain.stop().subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info("Current state of main after stopping is : {}",
OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Main estimated total lag");
cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info("Current state of side cart after stopping is : {}",
OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Side Cart estimated total lag");
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
public void staledLeaseAcquiring() throws InterruptedException {
final String ownerFirst = "Owner_First";
final String ownerSecond = "Owner_Second";
final String leasePrefix = "TEST";
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessor changeFeedProcessorFirst = new ChangeFeedProcessorBuilder()
.hostName(ownerFirst)
.handleAllVersionsAndDeletesChanges(docs -> {
log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst);
log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst);
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix(leasePrefix)
)
.buildChangeFeedProcessor();
ChangeFeedProcessor changeFeedProcessorSecond = new ChangeFeedProcessorBuilder()
.hostName(ownerSecond)
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond);
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond);
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeaseRenewInterval(Duration.ofSeconds(10))
.setLeaseAcquireInterval(Duration.ofSeconds(5))
.setLeaseExpirationInterval(Duration.ofSeconds(20))
.setFeedPollDelay(Duration.ofSeconds(2))
.setLeasePrefix(leasePrefix)
.setMaxItemCount(10)
.setMaxScaleCount(0)
)
.buildChangeFeedProcessor();
changeFeedProcessorFirst
.start()
.subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.then(Mono.just(changeFeedProcessorFirst)
.delayElement(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.flatMap(value ->
changeFeedProcessorFirst.stop()
.subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
))
.subscribe();
try {
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted exception", e);
}
log.info("Update leases for Change feed processor in thread {} using host {}", Thread.currentThread().getId(), "Owner_first");
SqlParameter param = new SqlParameter();
param.setName("@PartitionLeasePrefix");
param.setValue(leasePrefix);
SqlQuerySpec querySpec = new SqlQuerySpec(
"SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Collections.singletonList(param));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
createdLeaseCollection
.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class).byPage()
.flatMap(documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults()))
.flatMap(doc -> {
ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc);
leaseDocument.setOwner("TEMP_OWNER");
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
return createdLeaseCollection.replaceItem(leaseDocument, leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), options)
.map(CosmosItemResponse::getItem);
})
.map(leaseDocument -> {
log.info("QueryItems after Change feed processor processing; found host {}", leaseDocument.getOwner());
return leaseDocument;
})
.blockLast();
changeFeedProcessorSecond
.start()
.subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT);
List<InternalObjectNode> docDefList = new ArrayList<>();
for (int i = 0; i < FEED_COUNT; i++) {
docDefList.add(getDocumentDefinition());
}
bulkInsert(createdFeedCollection, docDefList, FEED_COUNT).blockLast();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
long remainingWork = 10 * CHANGE_FEED_PROCESSOR_TIMEOUT;
while (remainingWork > 0 && changeFeedProcessorFirst.isStarted() && !changeFeedProcessorSecond.isStarted()) {
remainingWork -= 100;
Thread.sleep(100);
}
assertThat(changeFeedProcessorSecond.isStarted()).as("Change Feed Processor instance is running").isTrue();
waitToReceiveDocuments(receivedDocuments, 30 * CHANGE_FEED_PROCESSOR_TIMEOUT, FEED_COUNT);
changeFeedProcessorSecond.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("DONE");
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
public void ownerNullAcquiring() throws InterruptedException {
final String ownerFirst = "Owner_First";
final String leasePrefix = "TEST";
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessor changeFeedProcessorFirst = new ChangeFeedProcessorBuilder()
.hostName(ownerFirst)
.handleAllVersionsAndDeletesChanges(docs -> {
logger.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst);
for (ChangeFeedProcessorItem item : docs) {
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {
}
processItem(item, receivedDocuments);
}
logger.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst);
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix(leasePrefix)
.setLeaseRenewInterval(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.setLeaseAcquireInterval(Duration.ofMillis(5 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.setLeaseExpirationInterval(Duration.ofMillis(6 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.setFeedPollDelay(Duration.ofSeconds(5))
)
.buildChangeFeedProcessor();
try {
logger.info("Start more creating documents");
List<InternalObjectNode> docDefList = new ArrayList<>();
for (int i = 0; i < FEED_COUNT; i++) {
docDefList.add(getDocumentDefinition());
}
bulkInsert(createdFeedCollection, docDefList, FEED_COUNT)
.last()
.flatMap(cosmosItemResponse -> {
logger.info("Start first Change feed processor");
return changeFeedProcessorFirst.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT));
})
.then(
Mono.just(changeFeedProcessorFirst)
.flatMap( value -> {
logger.info("Update leases for Change feed processor in thread {} using host {}", Thread.currentThread().getId(), "Owner_first");
try {
Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (InterruptedException ignored) {
}
logger.info("QueryItems before Change feed processor processing");
SqlParameter param1 = new SqlParameter();
param1.setName("@PartitionLeasePrefix");
param1.setValue(leasePrefix);
SqlParameter param2 = new SqlParameter();
param2.setName("@Owner");
param2.setValue(ownerFirst);
SqlQuerySpec querySpec = new SqlQuerySpec(
"SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix) AND c.Owner=@Owner", Arrays.asList(param1, param2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
return createdLeaseCollection.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class).byPage()
.flatMap(documentFeedResponse -> reactor.core.publisher.Flux.fromIterable(documentFeedResponse.getResults()))
.flatMap(doc -> {
ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc);
leaseDocument.setOwner(null);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
return createdLeaseCollection.replaceItem(leaseDocument, leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), options)
.map(CosmosItemResponse::getItem);
})
.map(leaseDocument -> {
logger.info("QueryItems after Change feed processor processing; current Owner is'{}'", leaseDocument.getOwner());
return leaseDocument;
})
.last()
.flatMap(leaseDocument -> {
logger.info("Start creating more documents");
List<InternalObjectNode> docDefList1 = new ArrayList<>();
for (int i = 0; i < FEED_COUNT; i++) {
docDefList1.add(getDocumentDefinition());
}
return bulkInsert(createdFeedCollection, docDefList1, FEED_COUNT)
.last();
});
}))
.subscribe();
} catch (Exception ex) {
log.error("First change feed processor did not start in the expected time", ex);
throw ex;
}
long remainingWork = 20 * CHANGE_FEED_PROCESSOR_TIMEOUT;
while (remainingWork > 0 && !changeFeedProcessorFirst.isStarted()) {
remainingWork -= 100;
Thread.sleep(100);
}
waitToReceiveDocuments(receivedDocuments, 30 * CHANGE_FEED_PROCESSOR_TIMEOUT, FEED_COUNT);
assertThat(changeFeedProcessorFirst.isStarted()).as("Change Feed Processor instance is running").isTrue();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
changeFeedProcessorFirst.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 20 * TIMEOUT, enabled = false)
public void inactiveOwnersRecovery() throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
String leasePrefix = "TEST";
ChangeFeedProcessor changeFeedProcessor = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.handleAllVersionsAndDeletesChanges(fullFidelityChangeFeedProcessorHandler(receivedDocuments))
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeaseRenewInterval(Duration.ofSeconds(1))
.setLeaseAcquireInterval(Duration.ofSeconds(1))
.setLeaseExpirationInterval(Duration.ofSeconds(5))
.setFeedPollDelay(Duration.ofSeconds(1))
.setLeasePrefix(leasePrefix)
.setMaxItemCount(100)
.setMaxScaleCount(0)
.setScheduler(Schedulers.newParallel("CFP parallel",
10 * Schedulers.DEFAULT_POOL_SIZE,
true))
)
.buildChangeFeedProcessor();
try {
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (Exception ex) {
log.error("Change feed processor did not start in the expected time", ex);
throw ex;
}
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments,2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT);
log.info("Update leases with random owners");
SqlParameter param1 = new SqlParameter();
param1.setName("@PartitionLeasePrefix");
param1.setValue(leasePrefix);
SqlQuerySpec querySpec = new SqlQuerySpec(
"SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Arrays.asList(param1));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
createdLeaseCollection.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class).byPage()
.flatMap(documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults()))
.flatMap(doc -> {
ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc);
leaseDocument.setOwner(RandomStringUtils.randomAlphabetic(10));
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
return createdLeaseCollection.replaceItem(leaseDocument, leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), options)
.map(CosmosItemResponse::getItem);
})
.flatMap(leaseDocument -> createdLeaseCollection.readItem(leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), InternalObjectNode.class))
.map(doc -> {
ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc.getItem());
log.info("Change feed processor current Owner is'{}'", leaseDocument.getOwner());
return leaseDocument;
})
.blockLast();
createdDocuments.clear();
receivedDocuments.clear();
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT);
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT)
public void endToEndTimeoutConfigShouldBeSuppressed() throws InterruptedException {
CosmosAsyncClient clientWithE2ETimeoutConfig = null;
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
clientWithE2ETimeoutConfig = this.getClientBuilder()
.endToEndOperationLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofMillis(1)).build())
.contentResponseOnWriteEnabled(true)
.buildAsyncClient();
CosmosAsyncDatabase testDatabase = clientWithE2ETimeoutConfig.getDatabase(this.createdDatabase.getId());
CosmosAsyncContainer createdFeedCollectionDuplicate = testDatabase.getContainer(createdFeedCollection.getId());
CosmosAsyncContainer createdLeaseCollectionDuplicate = testDatabase.getContainer(createdLeaseCollection.getId());
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessorOptions changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
ChangeFeedProcessor changeFeedProcessor = new ChangeFeedProcessorBuilder()
.options(changeFeedProcessorOptions)
.hostName(hostName)
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
log.info("START processing from thread {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {}", Thread.currentThread().getId());
})
.feedContainer(createdFeedCollectionDuplicate)
.leaseContainer(createdLeaseCollectionDuplicate)
.buildChangeFeedProcessor();
try {
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
logger.info("Starting ChangeFeed processor");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Finished starting ChangeFeed processor");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
logger.info("Set up read feed documents");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Validating changes now");
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
safeClose(clientWithE2ETimeoutConfig);
Thread.sleep(500);
}
}
@Test(groups = { "cfp-split" }, dataProvider = "contextTestConfigs", timeOut = 160 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
private Consumer<List<ChangeFeedProcessorItem>> changeFeedProcessorHandler(Map<String, ChangeFeedProcessorItem> receivedDocuments) {
return docs -> {
logger.info("START processing from thread in test {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
logger.info("END processing from thread {}", Thread.currentThread().getId());
};
}
private BiConsumer<List<ChangeFeedProcessorItem>, ChangeFeedProcessorContext> changeFeedProcessorHandlerWithContext(
Map<String, ChangeFeedProcessorItem> receivedDocuments, Set<String> receivedLeaseTokensFromContext) {
return (docs, context) -> {
logger.info("START processing from thread in test {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
validateChangeFeedProcessorContext(context);
processChangeFeedProcessorContext(context, receivedLeaseTokensFromContext);
logger.info("END processing from thread {}", Thread.currentThread().getId());
};
}
void validateChangeFeedProcessing(ChangeFeedProcessor changeFeedProcessor, List<InternalObjectNode> createdDocuments, Map<String, ChangeFeedProcessorItem> receivedDocuments, int sleepTime) throws InterruptedException {
assertThat(changeFeedProcessor.isStarted()).as("Change Feed Processor instance is running").isTrue();
List<ChangeFeedProcessorState> cfpCurrentState = changeFeedProcessor
.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writerWithDefaultPrettyPrinter().writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
})
.block();
assertThat(cfpCurrentState).isNotNull().as("Change Feed Processor current state");
for (ChangeFeedProcessorState item : cfpCurrentState) {
assertThat(item.getHostName()).isEqualTo(hostName).as("Change Feed Processor ownership");
}
assertThat(receivedDocuments.size()).isEqualTo(FEED_COUNT);
for (InternalObjectNode item : createdDocuments) {
assertThat(receivedDocuments.containsKey(item.getId())).as("Document with getId: " + item.getId()).isTrue();
}
}
void validateChangeFeedProcessorContext(ChangeFeedProcessorContext changeFeedProcessorContext) {
String leaseToken = changeFeedProcessorContext.getLeaseToken();
assertThat(leaseToken).isNotNull();
}
private Consumer<List<ChangeFeedProcessorItem>> fullFidelityChangeFeedProcessorHandler(Map<String, ChangeFeedProcessorItem> receivedDocuments) {
return docs -> {
log.info("START processing from thread in test {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {}", Thread.currentThread().getId());
};
}
private void waitToReceiveDocuments(Map<String, ChangeFeedProcessorItem> receivedDocuments, long timeoutInMillisecond, long count) throws InterruptedException {
long remainingWork = timeoutInMillisecond;
while (remainingWork > 0 && receivedDocuments.size() < count) {
remainingWork -= 100;
Thread.sleep(200);
}
assertThat(remainingWork > 0).as("Failed to receive all the feed documents").isTrue();
}
@BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT)
public void before_ChangeFeedProcessorTest() {
client = getClientBuilder().buildAsyncClient();
createdDatabase = getSharedCosmosDatabase(client);
}
@AfterClass(groups = { "emulator" }, timeOut = 2 * SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
private void setupReadFeedDocuments(List<InternalObjectNode> createdDocuments, Map<String, ChangeFeedProcessorItem> receivedDocuments, CosmosAsyncContainer feedCollection, long count) {
List<InternalObjectNode> docDefList = new ArrayList<>();
for(int i = 0; i < count; i++) {
InternalObjectNode item = getDocumentDefinition();
docDefList.add(item);
logger.info("Adding the following item to bulk list: {}", item);
}
createdDocuments.addAll(bulkInsertBlocking(feedCollection, docDefList));
waitIfNeededForReplicasToCatchUp(getClientBuilder());
}
private void createReadFeedDocuments(List<InternalObjectNode> createdDocuments, CosmosAsyncContainer feedCollection, long count) {
List<InternalObjectNode> docDefList = new ArrayList<>();
for(int i = 0; i < count; i++) {
docDefList.add(getDocumentDefinition());
}
createdDocuments.addAll(bulkInsertBlocking(feedCollection, docDefList));
waitIfNeededForReplicasToCatchUp(getClientBuilder());
}
private InternalObjectNode getDocumentDefinition() {
String uuid = UUID.randomUUID().toString();
InternalObjectNode doc = new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, uuid, uuid));
return doc;
}
private CosmosAsyncContainer createFeedCollection(int provisionedThroughput) {
CosmosContainerRequestOptions optionsFeedCollection = new CosmosContainerRequestOptions();
return createCollection(createdDatabase, getCollectionDefinitionWithFullFidelity(), optionsFeedCollection, provisionedThroughput);
}
private CosmosAsyncContainer createLeaseCollection(int provisionedThroughput) {
CosmosContainerRequestOptions options = new CosmosContainerRequestOptions();
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(
"leases_" + UUID.randomUUID(),
"/id");
return createCollection(createdDatabase, collectionDefinition, options, provisionedThroughput);
}
private CosmosAsyncContainer createLeaseMonitorCollection(int provisionedThroughput) {
CosmosContainerRequestOptions options = new CosmosContainerRequestOptions();
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(
"monitor_" + UUID.randomUUID(),
"/id");
return createCollection(createdDatabase, collectionDefinition, options, provisionedThroughput);
}
private Consumer<List<ChangeFeedProcessorItem>> leasesChangeFeedProcessorHandler(LeaseStateMonitor leaseStateMonitor) {
return docs -> {
log.info("LEASES processing from thread in test {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
try {
log
.debug("LEASE RECEIVED {}", OBJECT_MAPPER.writerWithDefaultPrettyPrinter().writeValueAsString(item));
} catch (JsonProcessingException e) {
log.error("Failure in processing json [{}]", e.getMessage(), e);
}
JsonNode leaseToken = item.getCurrent().get("LeaseToken");
if (leaseToken != null) {
JsonNode continuationTokenNode = item.getCurrent().get("ContinuationToken");
if (continuationTokenNode == null) {
log.error("Found invalid lease document");
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
else {
log.info("LEASE {} with continuation {}", leaseToken.asText(), continuationTokenNode.asText());
if (leaseStateMonitor.isAfterLeaseInitialization) {
String value = continuationTokenNode.asText().replaceAll("[^0-9]", "");
if (value.isEmpty()) {
log.error("Found unexpected continuation token that does not conform to the expected format");
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
long continuationToken = Long.parseLong(value);
if (leaseStateMonitor.parentContinuationToken > continuationToken) {
log.error("Found unexpected continuation token that did not advance after the split; parent: {}, current: {}");
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
}
}
leaseStateMonitor.receivedLeases.put(item.getCurrent().get("id").asText(), item.getCurrent());
}
}
log.info("LEASES processing from thread {}", Thread.currentThread().getId());
};
}
private static synchronized void processItem(ChangeFeedProcessorItem item, Map<String, ChangeFeedProcessorItem> receivedDocuments) {
log.info("RECEIVED {}", item);
receivedDocuments.put(item.getCurrent().get("id").asText(), item);
}
private static synchronized void processChangeFeedProcessorContext(
ChangeFeedProcessorContext context,
Set<String> receivedLeaseTokens) {
if (context == null) {
fail("The context cannot be null.");
}
if (context.getLeaseToken() == null || context.getLeaseToken().isEmpty()) {
fail("The lease token cannot be null or empty.");
}
receivedLeaseTokens.add(context.getLeaseToken());
}
class LeaseStateMonitor {
public Map<String, JsonNode> receivedLeases = new ConcurrentHashMap<>();
public volatile boolean isAfterLeaseInitialization = false;
public volatile boolean isAfterSplits = false;
public volatile long parentContinuationToken = 0;
public volatile boolean isContinuationTokenAdvancing = true;
}
} | class FullFidelityChangeFeedProcessorTest extends TestSuiteBase {
private final static Logger log = LoggerFactory.getLogger(FullFidelityChangeFeedProcessorTest.class);
private static final ObjectMapper OBJECT_MAPPER = Utils.getSimpleObjectMapper();
private CosmosAsyncDatabase createdDatabase;
private final String hostName = RandomStringUtils.randomAlphabetic(6);
private final int FEED_COUNT = 10;
private final int CHANGE_FEED_PROCESSOR_TIMEOUT = 5000;
private final int FEED_COLLECTION_THROUGHPUT = 400;
private final int FEED_COLLECTION_THROUGHPUT_FOR_SPLIT = 10100;
private final int LEASE_COLLECTION_THROUGHPUT = 400;
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuilders")
public FullFidelityChangeFeedProcessorTest(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
@DataProvider
public Object[] contextTestConfigs() {
return new Object[] {true, false};
}
@Test(groups = { "emulator" }, dataProvider = "contextTestConfigs", timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT)
public void fullFidelityChangeFeedProcessorStartFromNow(boolean isContextRequired) throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
Set<String> receivedLeaseTokensFromContext = ConcurrentHashMap.newKeySet();
ChangeFeedProcessorOptions changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
ChangeFeedProcessorBuilder changeFeedProcessorBuilder = new ChangeFeedProcessorBuilder()
.options(changeFeedProcessorOptions)
.hostName(hostName)
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection);
if (isContextRequired) {
changeFeedProcessorBuilder = changeFeedProcessorBuilder
.handleAllVersionsAndDeletesChanges(changeFeedProcessorHandlerWithContext(receivedDocuments, receivedLeaseTokensFromContext));
} else {
changeFeedProcessorBuilder = changeFeedProcessorBuilder
.handleAllVersionsAndDeletesChanges(changeFeedProcessorHandler(receivedDocuments));
}
ChangeFeedProcessor changeFeedProcessor = changeFeedProcessorBuilder.buildChangeFeedProcessor();
try {
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
logger.info("Starting ChangeFeed processor");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Finished starting ChangeFeed processor");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
logger.info("Set up read feed documents");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Validating changes now");
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
String leaseQuery = "select * from c where not contains(c.id, \"info\")";
List<JsonNode> leaseDocuments =
createdLeaseCollection
.queryItems(leaseQuery, JsonNode.class)
.byPage()
.blockFirst()
.getResults();
List<String> leaseTokensCollectedFromLeaseCollection =
leaseDocuments.stream().map(lease -> lease.get("LeaseToken").asText()).collect(Collectors.toList());
if (isContextRequired) {
assertThat(leaseTokensCollectedFromLeaseCollection).isNotNull();
assertThat(receivedLeaseTokensFromContext.size()).isEqualTo(leaseTokensCollectedFromLeaseCollection.size());
assertThat(receivedLeaseTokensFromContext.containsAll(leaseTokensCollectedFromLeaseCollection)).isTrue();
}
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, dataProvider = "contextTestConfigs", timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT)
public void fullFidelityChangeFeedProcessorStartFromContinuationToken(boolean isContextRequired) throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
Set<String> receivedLeaseTokensFromContext = ConcurrentHashMap.newKeySet();
ChangeFeedProcessorOptions changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
ChangeFeedProcessorBuilder changeFeedProcessorBuilder = new ChangeFeedProcessorBuilder()
.options(changeFeedProcessorOptions)
.hostName(hostName)
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection);
if (isContextRequired) {
changeFeedProcessorBuilder = changeFeedProcessorBuilder.handleAllVersionsAndDeletesChanges(
changeFeedProcessorHandlerWithContext(receivedDocuments, receivedLeaseTokensFromContext));
} else {
changeFeedProcessorBuilder = changeFeedProcessorBuilder.handleAllVersionsAndDeletesChanges(
changeFeedProcessorHandler(receivedDocuments));
}
ChangeFeedProcessor changeFeedProcessor = changeFeedProcessorBuilder.buildChangeFeedProcessor();
try {
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
logger.info("Starting ChangeFeed processor");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Finished starting ChangeFeed processor");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
logger.info("Set up read feed documents");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Validating changes now");
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
String leaseQuery = "select * from c where not contains(c.id, \"info\")";
List<JsonNode> leaseDocuments =
createdLeaseCollection
.queryItems(leaseQuery, JsonNode.class)
.byPage()
.blockFirst()
.getResults();
List<String> leaseTokensCollectedFromLeaseCollection =
leaseDocuments.stream().map(lease -> lease.get("LeaseToken").asText()).collect(Collectors.toList());
if (isContextRequired) {
assertThat(leaseTokensCollectedFromLeaseCollection).isNotNull();
assertThat(receivedLeaseTokensFromContext.size()).isEqualTo(leaseTokensCollectedFromLeaseCollection.size());
assertThat(receivedLeaseTokensFromContext.containsAll(leaseTokensCollectedFromLeaseCollection)).isTrue();
}
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
public void getCurrentState() throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessor changeFeedProcessorMain = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
log.info("START processing from thread {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {}", Thread.currentThread().getId());
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.buildChangeFeedProcessor();
ChangeFeedProcessor changeFeedProcessorSideCart = new ChangeFeedProcessorBuilder()
.hostName("side-cart")
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
fail("ERROR - we should not execute this handler");
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.buildChangeFeedProcessor();
try {
changeFeedProcessorMain.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.then(Mono.just(changeFeedProcessorMain)
.delayElement(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.flatMap(value -> changeFeedProcessorMain.stop()
.subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
))
.subscribe();
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT);
List<ChangeFeedProcessorState> cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentState.size()).isNotZero().as("Change Feed Processor number of leases should not be 0.");
int totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Main estimated total lag at start");
List<ChangeFeedProcessorState> cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Side Cart estimated total lag at start");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Main estimated total lag");
cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Side Cart estimated total lag");
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
public void getCurrentStateWithInsertedDocuments() throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessor changeFeedProcessorMain = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
log.info("START processing from thread {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {}", Thread.currentThread().getId());
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.buildChangeFeedProcessor();
ChangeFeedProcessor changeFeedProcessorSideCart = new ChangeFeedProcessorBuilder()
.hostName("side-cart")
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
fail("ERROR - we should not execute this handler");
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.buildChangeFeedProcessor();
try {
changeFeedProcessorMain.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT);
List<ChangeFeedProcessorState> cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentState.size()).isNotZero().as("Change Feed Processor number of leases should not be 0.");
int totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Main estimated total lag at start");
List<ChangeFeedProcessorState> cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Side Cart estimated total lag at start");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info("Current state of main after inserting documents is : {}",
OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Main estimated total lag");
cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info("Current state of side cart after inserting documents is : {}",
OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(0).as("Change Feed Processor Side Cart estimated total lag");
changeFeedProcessorMain.stop().subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
cfpCurrentState = changeFeedProcessorMain.getCurrentState()
.map(state -> {
try {
log.info("Current state of main after stopping is : {}",
OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentState) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Main estimated total lag");
cfpCurrentStateSideCart = changeFeedProcessorSideCart.getCurrentState()
.map(state -> {
try {
log.info("Current state of side cart after stopping is : {}",
OBJECT_MAPPER.writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
}).block();
assertThat(cfpCurrentStateSideCart.size()).isNotZero().as("Change Feed Processor side cart number of leases should not be 0.");
totalLag = 0;
for (ChangeFeedProcessorState item : cfpCurrentStateSideCart) {
totalLag += item.getEstimatedLag();
}
assertThat(totalLag).isEqualTo(FEED_COUNT).as("Change Feed Processor Side Cart estimated total lag");
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
public void staledLeaseAcquiring() throws InterruptedException {
final String ownerFirst = "Owner_First";
final String ownerSecond = "Owner_Second";
final String leasePrefix = "TEST";
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessor changeFeedProcessorFirst = new ChangeFeedProcessorBuilder()
.hostName(ownerFirst)
.handleAllVersionsAndDeletesChanges(docs -> {
log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst);
log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst);
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix(leasePrefix)
)
.buildChangeFeedProcessor();
ChangeFeedProcessor changeFeedProcessorSecond = new ChangeFeedProcessorBuilder()
.hostName(ownerSecond)
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
log.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond);
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerSecond);
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeaseRenewInterval(Duration.ofSeconds(10))
.setLeaseAcquireInterval(Duration.ofSeconds(5))
.setLeaseExpirationInterval(Duration.ofSeconds(20))
.setFeedPollDelay(Duration.ofSeconds(2))
.setLeasePrefix(leasePrefix)
.setMaxItemCount(10)
.setMaxScaleCount(0)
)
.buildChangeFeedProcessor();
changeFeedProcessorFirst
.start()
.subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.then(Mono.just(changeFeedProcessorFirst)
.delayElement(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.flatMap(value ->
changeFeedProcessorFirst.stop()
.subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
))
.subscribe();
try {
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted exception", e);
}
log.info("Update leases for Change feed processor in thread {} using host {}", Thread.currentThread().getId(), "Owner_first");
SqlParameter param = new SqlParameter();
param.setName("@PartitionLeasePrefix");
param.setValue(leasePrefix);
SqlQuerySpec querySpec = new SqlQuerySpec(
"SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Collections.singletonList(param));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
createdLeaseCollection
.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class).byPage()
.flatMap(documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults()))
.flatMap(doc -> {
ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc);
leaseDocument.setOwner("TEMP_OWNER");
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
return createdLeaseCollection.replaceItem(leaseDocument, leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), options)
.map(CosmosItemResponse::getItem);
})
.map(leaseDocument -> {
log.info("QueryItems after Change feed processor processing; found host {}", leaseDocument.getOwner());
return leaseDocument;
})
.blockLast();
changeFeedProcessorSecond
.start()
.subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
Thread.sleep(4 * CHANGE_FEED_PROCESSOR_TIMEOUT);
List<InternalObjectNode> docDefList = new ArrayList<>();
for (int i = 0; i < FEED_COUNT; i++) {
docDefList.add(getDocumentDefinition());
}
bulkInsert(createdFeedCollection, docDefList, FEED_COUNT).blockLast();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
long remainingWork = 10 * CHANGE_FEED_PROCESSOR_TIMEOUT;
while (remainingWork > 0 && changeFeedProcessorFirst.isStarted() && !changeFeedProcessorSecond.isStarted()) {
remainingWork -= 100;
Thread.sleep(100);
}
assertThat(changeFeedProcessorSecond.isStarted()).as("Change Feed Processor instance is running").isTrue();
waitToReceiveDocuments(receivedDocuments, 30 * CHANGE_FEED_PROCESSOR_TIMEOUT, FEED_COUNT);
changeFeedProcessorSecond.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("DONE");
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
public void ownerNullAcquiring() throws InterruptedException {
final String ownerFirst = "Owner_First";
final String leasePrefix = "TEST";
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessor changeFeedProcessorFirst = new ChangeFeedProcessorBuilder()
.hostName(ownerFirst)
.handleAllVersionsAndDeletesChanges(docs -> {
logger.info("START processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst);
for (ChangeFeedProcessorItem item : docs) {
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {
}
processItem(item, receivedDocuments);
}
logger.info("END processing from thread {} using host {}", Thread.currentThread().getId(), ownerFirst);
})
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeasePrefix(leasePrefix)
.setLeaseRenewInterval(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.setLeaseAcquireInterval(Duration.ofMillis(5 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.setLeaseExpirationInterval(Duration.ofMillis(6 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.setFeedPollDelay(Duration.ofSeconds(5))
)
.buildChangeFeedProcessor();
try {
logger.info("Start more creating documents");
List<InternalObjectNode> docDefList = new ArrayList<>();
for (int i = 0; i < FEED_COUNT; i++) {
docDefList.add(getDocumentDefinition());
}
bulkInsert(createdFeedCollection, docDefList, FEED_COUNT)
.last()
.flatMap(cosmosItemResponse -> {
logger.info("Start first Change feed processor");
return changeFeedProcessorFirst.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT));
})
.then(
Mono.just(changeFeedProcessorFirst)
.flatMap( value -> {
logger.info("Update leases for Change feed processor in thread {} using host {}", Thread.currentThread().getId(), "Owner_first");
try {
Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (InterruptedException ignored) {
}
logger.info("QueryItems before Change feed processor processing");
SqlParameter param1 = new SqlParameter();
param1.setName("@PartitionLeasePrefix");
param1.setValue(leasePrefix);
SqlParameter param2 = new SqlParameter();
param2.setName("@Owner");
param2.setValue(ownerFirst);
SqlQuerySpec querySpec = new SqlQuerySpec(
"SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix) AND c.Owner=@Owner", Arrays.asList(param1, param2));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
return createdLeaseCollection.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class).byPage()
.flatMap(documentFeedResponse -> reactor.core.publisher.Flux.fromIterable(documentFeedResponse.getResults()))
.flatMap(doc -> {
ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc);
leaseDocument.setOwner(null);
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
return createdLeaseCollection.replaceItem(leaseDocument, leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), options)
.map(CosmosItemResponse::getItem);
})
.map(leaseDocument -> {
logger.info("QueryItems after Change feed processor processing; current Owner is'{}'", leaseDocument.getOwner());
return leaseDocument;
})
.last()
.flatMap(leaseDocument -> {
logger.info("Start creating more documents");
List<InternalObjectNode> docDefList1 = new ArrayList<>();
for (int i = 0; i < FEED_COUNT; i++) {
docDefList1.add(getDocumentDefinition());
}
return bulkInsert(createdFeedCollection, docDefList1, FEED_COUNT)
.last();
});
}))
.subscribe();
} catch (Exception ex) {
log.error("First change feed processor did not start in the expected time", ex);
throw ex;
}
long remainingWork = 20 * CHANGE_FEED_PROCESSOR_TIMEOUT;
while (remainingWork > 0 && !changeFeedProcessorFirst.isStarted()) {
remainingWork -= 100;
Thread.sleep(100);
}
waitToReceiveDocuments(receivedDocuments, 30 * CHANGE_FEED_PROCESSOR_TIMEOUT, FEED_COUNT);
assertThat(changeFeedProcessorFirst.isStarted()).as("Change Feed Processor instance is running").isTrue();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
changeFeedProcessorFirst.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 20 * TIMEOUT, enabled = false)
public void inactiveOwnersRecovery() throws InterruptedException {
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
String leasePrefix = "TEST";
ChangeFeedProcessor changeFeedProcessor = new ChangeFeedProcessorBuilder()
.hostName(hostName)
.handleAllVersionsAndDeletesChanges(fullFidelityChangeFeedProcessorHandler(receivedDocuments))
.feedContainer(createdFeedCollection)
.leaseContainer(createdLeaseCollection)
.options(new ChangeFeedProcessorOptions()
.setLeaseRenewInterval(Duration.ofSeconds(1))
.setLeaseAcquireInterval(Duration.ofSeconds(1))
.setLeaseExpirationInterval(Duration.ofSeconds(5))
.setFeedPollDelay(Duration.ofSeconds(1))
.setLeasePrefix(leasePrefix)
.setMaxItemCount(100)
.setMaxScaleCount(0)
.setScheduler(Schedulers.newParallel("CFP parallel",
10 * Schedulers.DEFAULT_POOL_SIZE,
true))
)
.buildChangeFeedProcessor();
try {
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (Exception ex) {
log.error("Change feed processor did not start in the expected time", ex);
throw ex;
}
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments,2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT);
log.info("Update leases with random owners");
SqlParameter param1 = new SqlParameter();
param1.setName("@PartitionLeasePrefix");
param1.setValue(leasePrefix);
SqlQuerySpec querySpec = new SqlQuerySpec(
"SELECT * FROM c WHERE STARTSWITH(c.id, @PartitionLeasePrefix)", Arrays.asList(param1));
CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
createdLeaseCollection.queryItems(querySpec, cosmosQueryRequestOptions, InternalObjectNode.class).byPage()
.flatMap(documentFeedResponse -> Flux.fromIterable(documentFeedResponse.getResults()))
.flatMap(doc -> {
ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc);
leaseDocument.setOwner(RandomStringUtils.randomAlphabetic(10));
CosmosItemRequestOptions options = new CosmosItemRequestOptions();
return createdLeaseCollection.replaceItem(leaseDocument, leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), options)
.map(CosmosItemResponse::getItem);
})
.flatMap(leaseDocument -> createdLeaseCollection.readItem(leaseDocument.getId(), new PartitionKey(leaseDocument.getId()), InternalObjectNode.class))
.map(doc -> {
ServiceItemLeaseV1 leaseDocument = ServiceItemLeaseV1.fromDocument(doc.getItem());
log.info("Change feed processor current Owner is'{}'", leaseDocument.getOwner());
return leaseDocument;
})
.blockLast();
createdDocuments.clear();
receivedDocuments.clear();
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
Thread.sleep(CHANGE_FEED_PROCESSOR_TIMEOUT);
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
Thread.sleep(500);
}
}
@Test(groups = { "emulator" }, timeOut = 50 * CHANGE_FEED_PROCESSOR_TIMEOUT)
public void endToEndTimeoutConfigShouldBeSuppressed() throws InterruptedException {
CosmosAsyncClient clientWithE2ETimeoutConfig = null;
CosmosAsyncContainer createdFeedCollection = createFeedCollection(FEED_COLLECTION_THROUGHPUT);
CosmosAsyncContainer createdLeaseCollection = createLeaseCollection(LEASE_COLLECTION_THROUGHPUT);
try {
clientWithE2ETimeoutConfig = this.getClientBuilder()
.endToEndOperationLatencyPolicyConfig(new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofMillis(1)).build())
.contentResponseOnWriteEnabled(true)
.buildAsyncClient();
CosmosAsyncDatabase testDatabase = clientWithE2ETimeoutConfig.getDatabase(this.createdDatabase.getId());
CosmosAsyncContainer createdFeedCollectionDuplicate = testDatabase.getContainer(createdFeedCollection.getId());
CosmosAsyncContainer createdLeaseCollectionDuplicate = testDatabase.getContainer(createdLeaseCollection.getId());
List<InternalObjectNode> createdDocuments = new ArrayList<>();
Map<String, ChangeFeedProcessorItem> receivedDocuments = new ConcurrentHashMap<>();
ChangeFeedProcessorOptions changeFeedProcessorOptions = new ChangeFeedProcessorOptions();
ChangeFeedProcessor changeFeedProcessor = new ChangeFeedProcessorBuilder()
.options(changeFeedProcessorOptions)
.hostName(hostName)
.handleAllVersionsAndDeletesChanges((List<ChangeFeedProcessorItem> docs) -> {
log.info("START processing from thread {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {}", Thread.currentThread().getId());
})
.feedContainer(createdFeedCollectionDuplicate)
.leaseContainer(createdLeaseCollectionDuplicate)
.buildChangeFeedProcessor();
try {
changeFeedProcessor.start().subscribeOn(Schedulers.boundedElastic())
.timeout(Duration.ofMillis(2 * CHANGE_FEED_PROCESSOR_TIMEOUT))
.subscribe();
logger.info("Starting ChangeFeed processor");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Finished starting ChangeFeed processor");
setupReadFeedDocuments(createdDocuments, receivedDocuments, createdFeedCollection, FEED_COUNT);
logger.info("Set up read feed documents");
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
logger.info("Validating changes now");
validateChangeFeedProcessing(changeFeedProcessor, createdDocuments, receivedDocuments, 10 * CHANGE_FEED_PROCESSOR_TIMEOUT);
changeFeedProcessor.stop().subscribeOn(Schedulers.boundedElastic()).timeout(Duration.ofMillis(CHANGE_FEED_PROCESSOR_TIMEOUT)).subscribe();
Thread.sleep(2 * CHANGE_FEED_PROCESSOR_TIMEOUT);
} catch (Exception ex) {
log.error("Change feed processor did not start and stopped in the expected time", ex);
throw ex;
}
} finally {
safeDeleteCollection(createdFeedCollection);
safeDeleteCollection(createdLeaseCollection);
safeClose(clientWithE2ETimeoutConfig);
Thread.sleep(500);
}
}
@Test(groups = { "cfp-split" }, dataProvider = "contextTestConfigs", timeOut = 160 * CHANGE_FEED_PROCESSOR_TIMEOUT, enabled = false)
private Consumer<List<ChangeFeedProcessorItem>> changeFeedProcessorHandler(Map<String, ChangeFeedProcessorItem> receivedDocuments) {
return docs -> {
logger.info("START processing from thread in test {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
logger.info("END processing from thread {}", Thread.currentThread().getId());
};
}
private BiConsumer<List<ChangeFeedProcessorItem>, ChangeFeedProcessorContext> changeFeedProcessorHandlerWithContext(
Map<String, ChangeFeedProcessorItem> receivedDocuments, Set<String> receivedLeaseTokensFromContext) {
return (docs, context) -> {
logger.info("START processing from thread in test {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
validateChangeFeedProcessorContext(context);
processChangeFeedProcessorContext(context, receivedLeaseTokensFromContext);
logger.info("END processing from thread {}", Thread.currentThread().getId());
};
}
void validateChangeFeedProcessing(ChangeFeedProcessor changeFeedProcessor, List<InternalObjectNode> createdDocuments, Map<String, ChangeFeedProcessorItem> receivedDocuments, int sleepTime) throws InterruptedException {
assertThat(changeFeedProcessor.isStarted()).as("Change Feed Processor instance is running").isTrue();
List<ChangeFeedProcessorState> cfpCurrentState = changeFeedProcessor
.getCurrentState()
.map(state -> {
try {
log.info(OBJECT_MAPPER.writerWithDefaultPrettyPrinter().writeValueAsString(state));
} catch (JsonProcessingException ex) {
log.error("Unexpected", ex);
}
return state;
})
.block();
assertThat(cfpCurrentState).isNotNull().as("Change Feed Processor current state");
for (ChangeFeedProcessorState item : cfpCurrentState) {
assertThat(item.getHostName()).isEqualTo(hostName).as("Change Feed Processor ownership");
}
assertThat(receivedDocuments.size()).isEqualTo(FEED_COUNT);
for (InternalObjectNode item : createdDocuments) {
assertThat(receivedDocuments.containsKey(item.getId())).as("Document with getId: " + item.getId()).isTrue();
}
}
void validateChangeFeedProcessorContext(ChangeFeedProcessorContext changeFeedProcessorContext) {
String leaseToken = changeFeedProcessorContext.getLeaseToken();
assertThat(leaseToken).isNotNull();
}
private Consumer<List<ChangeFeedProcessorItem>> fullFidelityChangeFeedProcessorHandler(Map<String, ChangeFeedProcessorItem> receivedDocuments) {
return docs -> {
log.info("START processing from thread in test {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
processItem(item, receivedDocuments);
}
log.info("END processing from thread {}", Thread.currentThread().getId());
};
}
private void waitToReceiveDocuments(Map<String, ChangeFeedProcessorItem> receivedDocuments, long timeoutInMillisecond, long count) throws InterruptedException {
long remainingWork = timeoutInMillisecond;
while (remainingWork > 0 && receivedDocuments.size() < count) {
remainingWork -= 100;
Thread.sleep(200);
}
assertThat(remainingWork > 0).as("Failed to receive all the feed documents").isTrue();
}
@BeforeClass(groups = { "emulator" }, timeOut = SETUP_TIMEOUT)
public void before_ChangeFeedProcessorTest() {
client = getClientBuilder().buildAsyncClient();
createdDatabase = getSharedCosmosDatabase(client);
}
@AfterClass(groups = { "emulator" }, timeOut = 2 * SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
private void setupReadFeedDocuments(List<InternalObjectNode> createdDocuments, Map<String, ChangeFeedProcessorItem> receivedDocuments, CosmosAsyncContainer feedCollection, long count) {
List<InternalObjectNode> docDefList = new ArrayList<>();
for(int i = 0; i < count; i++) {
InternalObjectNode item = getDocumentDefinition();
docDefList.add(item);
logger.info("Adding the following item to bulk list: {}", item);
}
createdDocuments.addAll(bulkInsertBlocking(feedCollection, docDefList));
waitIfNeededForReplicasToCatchUp(getClientBuilder());
}
private void createReadFeedDocuments(List<InternalObjectNode> createdDocuments, CosmosAsyncContainer feedCollection, long count) {
List<InternalObjectNode> docDefList = new ArrayList<>();
for(int i = 0; i < count; i++) {
docDefList.add(getDocumentDefinition());
}
createdDocuments.addAll(bulkInsertBlocking(feedCollection, docDefList));
waitIfNeededForReplicasToCatchUp(getClientBuilder());
}
private InternalObjectNode getDocumentDefinition() {
String uuid = UUID.randomUUID().toString();
InternalObjectNode doc = new InternalObjectNode(String.format("{ "
+ "\"id\": \"%s\", "
+ "\"mypk\": \"%s\", "
+ "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]"
+ "}"
, uuid, uuid));
return doc;
}
private CosmosAsyncContainer createFeedCollection(int provisionedThroughput) {
CosmosContainerRequestOptions optionsFeedCollection = new CosmosContainerRequestOptions();
return createCollection(createdDatabase, getCollectionDefinitionWithFullFidelity(), optionsFeedCollection, provisionedThroughput);
}
private CosmosAsyncContainer createLeaseCollection(int provisionedThroughput) {
CosmosContainerRequestOptions options = new CosmosContainerRequestOptions();
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(
"leases_" + UUID.randomUUID(),
"/id");
return createCollection(createdDatabase, collectionDefinition, options, provisionedThroughput);
}
private CosmosAsyncContainer createLeaseMonitorCollection(int provisionedThroughput) {
CosmosContainerRequestOptions options = new CosmosContainerRequestOptions();
CosmosContainerProperties collectionDefinition = new CosmosContainerProperties(
"monitor_" + UUID.randomUUID(),
"/id");
return createCollection(createdDatabase, collectionDefinition, options, provisionedThroughput);
}
private Consumer<List<ChangeFeedProcessorItem>> leasesChangeFeedProcessorHandler(LeaseStateMonitor leaseStateMonitor) {
return docs -> {
log.info("LEASES processing from thread in test {}", Thread.currentThread().getId());
for (ChangeFeedProcessorItem item : docs) {
try {
log
.debug("LEASE RECEIVED {}", OBJECT_MAPPER.writerWithDefaultPrettyPrinter().writeValueAsString(item));
} catch (JsonProcessingException e) {
log.error("Failure in processing json [{}]", e.getMessage(), e);
}
JsonNode leaseToken = item.getCurrent().get("LeaseToken");
if (leaseToken != null) {
JsonNode continuationTokenNode = item.getCurrent().get("ContinuationToken");
if (continuationTokenNode == null) {
log.error("Found invalid lease document");
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
else {
log.info("LEASE {} with continuation {}", leaseToken.asText(), continuationTokenNode.asText());
if (leaseStateMonitor.isAfterLeaseInitialization) {
String value = continuationTokenNode.asText().replaceAll("[^0-9]", "");
if (value.isEmpty()) {
log.error("Found unexpected continuation token that does not conform to the expected format");
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
long continuationToken = Long.parseLong(value);
if (leaseStateMonitor.parentContinuationToken > continuationToken) {
log.error("Found unexpected continuation token that did not advance after the split; parent: {}, current: {}");
leaseStateMonitor.isContinuationTokenAdvancing = false;
}
}
}
leaseStateMonitor.receivedLeases.put(item.getCurrent().get("id").asText(), item.getCurrent());
}
}
log.info("LEASES processing from thread {}", Thread.currentThread().getId());
};
}
private static synchronized void processItem(ChangeFeedProcessorItem item, Map<String, ChangeFeedProcessorItem> receivedDocuments) {
log.info("RECEIVED {}", item);
receivedDocuments.put(item.getCurrent().get("id").asText(), item);
}
private static synchronized void processChangeFeedProcessorContext(
ChangeFeedProcessorContext context,
Set<String> receivedLeaseTokens) {
if (context == null) {
fail("The context cannot be null.");
}
if (context.getLeaseToken() == null || context.getLeaseToken().isEmpty()) {
fail("The lease token cannot be null or empty.");
}
receivedLeaseTokens.add(context.getLeaseToken());
}
class LeaseStateMonitor {
public Map<String, JsonNode> receivedLeases = new ConcurrentHashMap<>();
public volatile boolean isAfterLeaseInitialization = false;
public volatile boolean isAfterSplits = false;
public volatile long parentContinuationToken = 0;
public volatile boolean isContinuationTokenAdvancing = true;
}
} |
Junit 5 has API to assert thrown exception, ```java assertThrows( MyException.class, () -> myObject.doThing(), "Expected doThing() to throw, but it didn't" ); ``` | public void testInvalidScopeFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
try {
credential.getTokenSync(request);
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
} | credential.getTokenSync(request); | public void testInvalidScopeFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
assertThrows(IllegalArgumentException.class, () -> credential.getTokenSync(request));
} | class AzureCliCredentialNegativeTest {
static Stream<String> invalidCharacters() {
return Stream.of("|", "&", ";");
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidScopeFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidTenantFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidTenantFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
try {
credential.getTokenSync(request);
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
}
} | class AzureCliCredentialNegativeTest {
static Stream<String> invalidCharacters() {
return Stream.of("|", "&", ";");
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidScopeFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidTenantFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidTenantFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
assertThrows(IllegalArgumentException.class, () -> credential.getTokenSync(request));
}
} |
This can be replaced with Junit 5 API: ```java assertThrows( MyException.class, () -> myObject.doThing(), "Expected doThing() to throw, but it didn't" ); ``` | public void testInvalidTenantFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
try {
credential.getTokenSync(request);
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
} | credential.getTokenSync(request); | public void testInvalidTenantFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
assertThrows(IllegalArgumentException.class, () -> credential.getTokenSync(request));
} | class AzureCliCredentialNegativeTest {
static Stream<String> invalidCharacters() {
return Stream.of("|", "&", ";");
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidScopeFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidTenantFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidScopeFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
try {
credential.getTokenSync(request);
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
}
@ParameterizedTest
@MethodSource("invalidCharacters")
} | class AzureCliCredentialNegativeTest {
static Stream<String> invalidCharacters() {
return Stream.of("|", "&", ";");
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidScopeFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidTenantFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidScopeFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureCliCredential credential = new AzureCliCredentialBuilder().build();
assertThrows(IllegalArgumentException.class, () -> credential.getTokenSync(request));
}
@ParameterizedTest
@MethodSource("invalidCharacters")
} |
This can be replaced with Junit 5 API: ```java assertThrows( MyException.class, () -> myObject.doThing(), "Expected doThing() to throw, but it didn't" ); ``` | public void testInvalidScopeFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureDeveloperCliCredential credential = new AzureDeveloperCliCredentialBuilder().build();
try {
credential.getTokenSync(request);
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
} | credential.getTokenSync(request); | public void testInvalidScopeFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureDeveloperCliCredential credential = new AzureDeveloperCliCredentialBuilder().build();
assertThrows(IllegalArgumentException.class, () -> credential.getTokenSync(request));
} | class AzureDeveloperCliCredentialNegativeTest {
static Stream<String> invalidCharacters() {
return Stream.of("|", "&", ";");
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidScopeFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureDeveloperCliCredential credential = new AzureDeveloperCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidTenantFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureDeveloperCliCredential credential = new AzureDeveloperCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidTenantFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureDeveloperCliCredential credential = new AzureDeveloperCliCredentialBuilder().build();
try {
credential.getTokenSync(request);
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
}
} | class AzureDeveloperCliCredentialNegativeTest {
static Stream<String> invalidCharacters() {
return Stream.of("|", "&", ";");
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidScopeFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureDeveloperCliCredential credential = new AzureDeveloperCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidTenantFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureDeveloperCliCredential credential = new AzureDeveloperCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidTenantFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureDeveloperCliCredential credential = new AzureDeveloperCliCredentialBuilder().build();
assertThrows(IllegalArgumentException.class, () -> credential.getTokenSync(request));
}
} |
This can be replaced with Junit 5 API: ```java assertThrows( MyException.class, () -> myObject.doThing(), "Expected doThing() to throw, but it didn't" ); ``` | public void testInvalidTenantFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureDeveloperCliCredential credential = new AzureDeveloperCliCredentialBuilder().build();
try {
credential.getTokenSync(request);
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
} | credential.getTokenSync(request); | public void testInvalidTenantFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureDeveloperCliCredential credential = new AzureDeveloperCliCredentialBuilder().build();
assertThrows(IllegalArgumentException.class, () -> credential.getTokenSync(request));
} | class AzureDeveloperCliCredentialNegativeTest {
static Stream<String> invalidCharacters() {
return Stream.of("|", "&", ";");
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidScopeFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureDeveloperCliCredential credential = new AzureDeveloperCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidTenantFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureDeveloperCliCredential credential = new AzureDeveloperCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidScopeFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureDeveloperCliCredential credential = new AzureDeveloperCliCredentialBuilder().build();
try {
credential.getTokenSync(request);
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
}
}
@ParameterizedTest
@MethodSource("invalidCharacters")
} | class AzureDeveloperCliCredentialNegativeTest {
static Stream<String> invalidCharacters() {
return Stream.of("|", "&", ";");
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidScopeFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureDeveloperCliCredential credential = new AzureDeveloperCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidTenantFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope").setTenantId("tenant" + invalidCharacter);
AzureDeveloperCliCredential credential = new AzureDeveloperCliCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
}
@ParameterizedTest
@MethodSource("invalidCharacters")
public void testInvalidScopeFromRequestSync(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzureDeveloperCliCredential credential = new AzureDeveloperCliCredentialBuilder().build();
assertThrows(IllegalArgumentException.class, () -> credential.getTokenSync(request));
}
@ParameterizedTest
@MethodSource("invalidCharacters")
} |
possible negative value? | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (!(e instanceof CosmosException)) {
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
CosmosException cosmosException = (CosmosException)e;
if (cosmosException.getStatusCode() != HttpConstants.StatusCodes.NOTFOUND ||
cosmosException.getSubStatusCode() != HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE) {
LOGGER.debug(
"SessionTokenMismatchRetryPolicy not retrying because StatusCode or SubStatusCode not found.");
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
if (this.waitTimeTimeoutHelper.isElapsed()) {
LOGGER.warn(
"SessionTokenMismatchRetryPolicy not retrying because it has exceeded " +
"the time limit. Retry count = {}",
this.retryCount);
return Mono.just(ShouldRetryResult.noRetry());
}
if (!shouldRetryLocally(sessionRetryOptions, retryCount.get())) {
LOGGER.debug("SessionTokenMismatchRetryPolicy not retrying because it a retry attempt for the current region and " +
"fallback to a different region is preferred ");
return Mono.just(ShouldRetryResult.noRetry());
}
Duration effectiveBackoff = Duration.ZERO;
int attempt = this.retryCount.getAndIncrement();
if (attempt > 0) {
effectiveBackoff = getEffectiveBackoff(
this.currentBackoff,
this.waitTimeTimeoutHelper.getRemainingTime());
this.currentBackoff = getEffectiveBackoff(
Duration.ofMillis(this.currentBackoff.toMillis() * BACKOFF_MULTIPLIER),
this.maximumBackoff);
}
if (sessionRetryOptionsAccessor.getRegionSwitchHint(sessionRetryOptions) ==
CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED
&& attempt >= (this.maxRetryAttemptsInCurrentRegion.get() - 1)) {
Duration remainingMinRetryTimeInLocalRegion = this.waitTimeTimeoutHelper.getRemainingTime(
sessionRetryOptionsAccessor.getMinInRegionRetryTime(this.sessionRetryOptions)
);
if (remainingMinRetryTimeInLocalRegion.compareTo(effectiveBackoff) > 0) {
effectiveBackoff = remainingMinRetryTimeInLocalRegion;
}
}
LOGGER.debug(
"SessionTokenMismatchRetryPolicy will retry. Retry count = {}. Backoff time = {} ms",
this.retryCount,
effectiveBackoff.toMillis());
return Mono.just(ShouldRetryResult.retryAfter(effectiveBackoff));
} | sessionRetryOptionsAccessor.getMinInRegionRetryTime(this.sessionRetryOptions) | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (!(e instanceof CosmosException)) {
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
CosmosException cosmosException = (CosmosException)e;
if (cosmosException.getStatusCode() != HttpConstants.StatusCodes.NOTFOUND ||
cosmosException.getSubStatusCode() != HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE) {
LOGGER.debug(
"SessionTokenMismatchRetryPolicy not retrying because StatusCode or SubStatusCode not found.");
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
if (this.waitTimeTimeoutHelper.isElapsed()) {
LOGGER.warn(
"SessionTokenMismatchRetryPolicy not retrying because it has exceeded " +
"the time limit. Retry count = {}",
this.retryCount);
return Mono.just(ShouldRetryResult.noRetry());
}
if (!shouldRetryLocally(regionSwitchHint, retryCount.get())) {
LOGGER.debug("SessionTokenMismatchRetryPolicy not retrying because it a retry attempt for the current region and " +
"fallback to a different region is preferred ");
return Mono.just(ShouldRetryResult.noRetry());
}
Duration effectiveBackoff = Duration.ZERO;
int attempt = this.retryCount.getAndIncrement();
if (attempt > 0) {
effectiveBackoff = getEffectiveBackoff(
this.currentBackoff,
this.waitTimeTimeoutHelper.getRemainingTime());
this.currentBackoff = getEffectiveBackoff(
Duration.ofMillis(this.currentBackoff.toMillis() * BACKOFF_MULTIPLIER),
this.maximumBackoff);
}
if (regionSwitchHint == CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED
&& attempt >= (this.maxRetryAttemptsInCurrentRegion.get() - 1)) {
Duration remainingMinRetryTimeInLocalRegion =
this.waitTimeTimeoutHelper.getRemainingTime(minInRegionRetryTime);
if (remainingMinRetryTimeInLocalRegion.compareTo(effectiveBackoff) > 0) {
effectiveBackoff = remainingMinRetryTimeInLocalRegion;
}
}
LOGGER.debug(
"SessionTokenMismatchRetryPolicy will retry. Retry count = {}. Backoff time = {} ms",
this.retryCount,
effectiveBackoff.toMillis());
return Mono.just(ShouldRetryResult.retryAfter(effectiveBackoff));
} | class SessionTokenMismatchRetryPolicy implements IRetryPolicy {
private final static ImplementationBridgeHelpers.CosmosSessionRetryOptionsHelper.CosmosSessionRetryOptionsAccessor
sessionRetryOptionsAccessor = ImplementationBridgeHelpers
.CosmosSessionRetryOptionsHelper
.getCosmosSessionRetryOptionsAccessor();
private final static Logger LOGGER = LoggerFactory.getLogger(SessionTokenMismatchRetryPolicy.class);
private static final int BACKOFF_MULTIPLIER = 5;
private final Duration maximumBackoff;
private final TimeoutHelper waitTimeTimeoutHelper;
private final AtomicInteger retryCount;
private Duration currentBackoff;
private RetryContext retryContext;
private final AtomicInteger maxRetryAttemptsInCurrentRegion;
private final SessionRetryOptions sessionRetryOptions;
public SessionTokenMismatchRetryPolicy(
RetryContext retryContext,
SessionRetryOptions sessionRetryOptions) {
this.waitTimeTimeoutHelper = new TimeoutHelper(Duration.ofMillis(Configs.getSessionTokenMismatchDefaultWaitTimeInMs()));
this.maximumBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchMaximumBackoffTimeInMs());
this.retryCount = new AtomicInteger();
this.retryCount.set(0);
this.currentBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchInitialBackoffTimeInMs());
this.maxRetryAttemptsInCurrentRegion =
new AtomicInteger(sessionRetryOptionsAccessor.getMaxInRegionRetryCount(sessionRetryOptions));
this.retryContext = retryContext;
this.sessionRetryOptions = sessionRetryOptions;
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private static Duration getEffectiveBackoff(Duration backoff, Duration remainingTime) {
if (backoff.compareTo(remainingTime) > 0) {
return remainingTime;
}
return backoff;
}
private boolean shouldRetryLocally(SessionRetryOptions sessionRetryOptions, int sessionTokenMismatchRetryAttempts) {
if (sessionRetryOptions == null) {
return true;
}
CosmosRegionSwitchHint regionSwitchHint = sessionRetryOptionsAccessor
.getRegionSwitchHint(sessionRetryOptions);
if (regionSwitchHint != CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED) {
return true;
}
return sessionTokenMismatchRetryAttempts <= (this.maxRetryAttemptsInCurrentRegion.get() - 1);
}
} | class SessionTokenMismatchRetryPolicy implements IRetryPolicy {
private final static ImplementationBridgeHelpers.CosmosSessionRetryOptionsHelper.CosmosSessionRetryOptionsAccessor
sessionRetryOptionsAccessor = ImplementationBridgeHelpers
.CosmosSessionRetryOptionsHelper
.getCosmosSessionRetryOptionsAccessor();
private final static Logger LOGGER = LoggerFactory.getLogger(SessionTokenMismatchRetryPolicy.class);
private static final int BACKOFF_MULTIPLIER = 5;
private final Duration maximumBackoff;
private final TimeoutHelper waitTimeTimeoutHelper;
private final AtomicInteger retryCount;
private Duration currentBackoff;
private RetryContext retryContext;
private final AtomicInteger maxRetryAttemptsInCurrentRegion;
private final CosmosRegionSwitchHint regionSwitchHint;
private final Duration minInRegionRetryTime;
public SessionTokenMismatchRetryPolicy(
RetryContext retryContext,
SessionRetryOptions sessionRetryOptions) {
this.waitTimeTimeoutHelper = new TimeoutHelper(Duration.ofMillis(Configs.getSessionTokenMismatchDefaultWaitTimeInMs()));
this.maximumBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchMaximumBackoffTimeInMs());
this.retryCount = new AtomicInteger();
this.retryCount.set(0);
this.currentBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchInitialBackoffTimeInMs());
if (sessionRetryOptions != null) {
this.maxRetryAttemptsInCurrentRegion =
new AtomicInteger(sessionRetryOptionsAccessor.getMaxInRegionRetryCount(sessionRetryOptions));
this.regionSwitchHint = sessionRetryOptionsAccessor.getRegionSwitchHint(sessionRetryOptions);
this.minInRegionRetryTime = sessionRetryOptionsAccessor.getMinInRegionRetryTime(sessionRetryOptions);
} else {
this.maxRetryAttemptsInCurrentRegion = null;
this.regionSwitchHint = CosmosRegionSwitchHint.LOCAL_REGION_PREFERRED;
this.minInRegionRetryTime = null;
}
this.retryContext = retryContext;
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private static Duration getEffectiveBackoff(Duration backoff, Duration remainingTime) {
if (backoff.compareTo(remainingTime) > 0) {
return remainingTime;
}
return backoff;
}
private boolean shouldRetryLocally(CosmosRegionSwitchHint regionSwitchHint, int sessionTokenMismatchRetryAttempts) {
if (regionSwitchHint != CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED) {
return true;
}
return sessionTokenMismatchRetryAttempts <= (this.maxRetryAttemptsInCurrentRegion.get() - 1);
}
} |
This test is missing Sync test case. | public void testInvalidScopeFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzurePowerShellCredential credential = new AzurePowerShellCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
} | .verify(); | public void testInvalidScopeFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzurePowerShellCredential credential = new AzurePowerShellCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
} | class AzurePowerShellCredentialNegativeTest {
static Stream<String> invalidCharacters() {
return Stream.of("|", "&", "'", ";");
}
@ParameterizedTest
@MethodSource("invalidCharacters")
} | class AzurePowerShellCredentialNegativeTest {
static Stream<String> invalidCharacters() {
return Stream.of("|", "&", "'", ";");
}
@ParameterizedTest
@MethodSource("invalidCharacters")
} |
`AzurePowerShellCredential` doesn't have a `getTokenSync`. (Maybe it should!) | public void testInvalidScopeFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzurePowerShellCredential credential = new AzurePowerShellCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
} | .verify(); | public void testInvalidScopeFromRequest(String invalidCharacter) {
TokenRequestContext request = new TokenRequestContext().addScopes("scope" + invalidCharacter);
AzurePowerShellCredential credential = new AzurePowerShellCredentialBuilder().build();
StepVerifier.create(credential.getToken(request))
.expectErrorMatches(e -> e instanceof IllegalArgumentException)
.verify();
} | class AzurePowerShellCredentialNegativeTest {
static Stream<String> invalidCharacters() {
return Stream.of("|", "&", "'", ";");
}
@ParameterizedTest
@MethodSource("invalidCharacters")
} | class AzurePowerShellCredentialNegativeTest {
static Stream<String> invalidCharacters() {
return Stream.of("|", "&", "'", ";");
}
@ParameterizedTest
@MethodSource("invalidCharacters")
} |
hmm, I am wonder do we really need the maxRetryCount, why the minInRegionRetryTime is not enough( feel it could be hard for the customers to set all these values properly - like if maxRetryCount being too large, then the request could stuck in local region much longer than expected. - If the maxRetryCount being too low, then we may not hit the minInRegionRetryTime) | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (!(e instanceof CosmosException)) {
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
CosmosException cosmosException = (CosmosException)e;
if (cosmosException.getStatusCode() != HttpConstants.StatusCodes.NOTFOUND ||
cosmosException.getSubStatusCode() != HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE) {
LOGGER.debug(
"SessionTokenMismatchRetryPolicy not retrying because StatusCode or SubStatusCode not found.");
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
if (this.waitTimeTimeoutHelper.isElapsed()) {
LOGGER.warn(
"SessionTokenMismatchRetryPolicy not retrying because it has exceeded " +
"the time limit. Retry count = {}",
this.retryCount);
return Mono.just(ShouldRetryResult.noRetry());
}
if (!shouldRetryLocally(sessionRetryOptions, retryCount.get())) {
LOGGER.debug("SessionTokenMismatchRetryPolicy not retrying because it a retry attempt for the current region and " +
"fallback to a different region is preferred ");
return Mono.just(ShouldRetryResult.noRetry());
}
Duration effectiveBackoff = Duration.ZERO;
int attempt = this.retryCount.getAndIncrement();
if (attempt > 0) {
effectiveBackoff = getEffectiveBackoff(
this.currentBackoff,
this.waitTimeTimeoutHelper.getRemainingTime());
this.currentBackoff = getEffectiveBackoff(
Duration.ofMillis(this.currentBackoff.toMillis() * BACKOFF_MULTIPLIER),
this.maximumBackoff);
}
if (sessionRetryOptionsAccessor.getRegionSwitchHint(sessionRetryOptions) ==
CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED
&& attempt >= (this.maxRetryAttemptsInCurrentRegion.get() - 1)) {
Duration remainingMinRetryTimeInLocalRegion = this.waitTimeTimeoutHelper.getRemainingTime(
sessionRetryOptionsAccessor.getMinInRegionRetryTime(this.sessionRetryOptions)
);
if (remainingMinRetryTimeInLocalRegion.compareTo(effectiveBackoff) > 0) {
effectiveBackoff = remainingMinRetryTimeInLocalRegion;
}
}
LOGGER.debug(
"SessionTokenMismatchRetryPolicy will retry. Retry count = {}. Backoff time = {} ms",
this.retryCount,
effectiveBackoff.toMillis());
return Mono.just(ShouldRetryResult.retryAfter(effectiveBackoff));
} | && attempt >= (this.maxRetryAttemptsInCurrentRegion.get() - 1)) { | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (!(e instanceof CosmosException)) {
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
CosmosException cosmosException = (CosmosException)e;
if (cosmosException.getStatusCode() != HttpConstants.StatusCodes.NOTFOUND ||
cosmosException.getSubStatusCode() != HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE) {
LOGGER.debug(
"SessionTokenMismatchRetryPolicy not retrying because StatusCode or SubStatusCode not found.");
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
if (this.waitTimeTimeoutHelper.isElapsed()) {
LOGGER.warn(
"SessionTokenMismatchRetryPolicy not retrying because it has exceeded " +
"the time limit. Retry count = {}",
this.retryCount);
return Mono.just(ShouldRetryResult.noRetry());
}
if (!shouldRetryLocally(regionSwitchHint, retryCount.get())) {
LOGGER.debug("SessionTokenMismatchRetryPolicy not retrying because it a retry attempt for the current region and " +
"fallback to a different region is preferred ");
return Mono.just(ShouldRetryResult.noRetry());
}
Duration effectiveBackoff = Duration.ZERO;
int attempt = this.retryCount.getAndIncrement();
if (attempt > 0) {
effectiveBackoff = getEffectiveBackoff(
this.currentBackoff,
this.waitTimeTimeoutHelper.getRemainingTime());
this.currentBackoff = getEffectiveBackoff(
Duration.ofMillis(this.currentBackoff.toMillis() * BACKOFF_MULTIPLIER),
this.maximumBackoff);
}
if (regionSwitchHint == CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED
&& attempt >= (this.maxRetryAttemptsInCurrentRegion.get() - 1)) {
Duration remainingMinRetryTimeInLocalRegion =
this.waitTimeTimeoutHelper.getRemainingTime(minInRegionRetryTime);
if (remainingMinRetryTimeInLocalRegion.compareTo(effectiveBackoff) > 0) {
effectiveBackoff = remainingMinRetryTimeInLocalRegion;
}
}
LOGGER.debug(
"SessionTokenMismatchRetryPolicy will retry. Retry count = {}. Backoff time = {} ms",
this.retryCount,
effectiveBackoff.toMillis());
return Mono.just(ShouldRetryResult.retryAfter(effectiveBackoff));
} | class SessionTokenMismatchRetryPolicy implements IRetryPolicy {
private final static ImplementationBridgeHelpers.CosmosSessionRetryOptionsHelper.CosmosSessionRetryOptionsAccessor
sessionRetryOptionsAccessor = ImplementationBridgeHelpers
.CosmosSessionRetryOptionsHelper
.getCosmosSessionRetryOptionsAccessor();
private final static Logger LOGGER = LoggerFactory.getLogger(SessionTokenMismatchRetryPolicy.class);
private static final int BACKOFF_MULTIPLIER = 5;
private final Duration maximumBackoff;
private final TimeoutHelper waitTimeTimeoutHelper;
private final AtomicInteger retryCount;
private Duration currentBackoff;
private RetryContext retryContext;
private final AtomicInteger maxRetryAttemptsInCurrentRegion;
private final SessionRetryOptions sessionRetryOptions;
public SessionTokenMismatchRetryPolicy(
RetryContext retryContext,
SessionRetryOptions sessionRetryOptions) {
this.waitTimeTimeoutHelper = new TimeoutHelper(Duration.ofMillis(Configs.getSessionTokenMismatchDefaultWaitTimeInMs()));
this.maximumBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchMaximumBackoffTimeInMs());
this.retryCount = new AtomicInteger();
this.retryCount.set(0);
this.currentBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchInitialBackoffTimeInMs());
this.maxRetryAttemptsInCurrentRegion =
new AtomicInteger(sessionRetryOptionsAccessor.getMaxInRegionRetryCount(sessionRetryOptions));
this.retryContext = retryContext;
this.sessionRetryOptions = sessionRetryOptions;
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private static Duration getEffectiveBackoff(Duration backoff, Duration remainingTime) {
if (backoff.compareTo(remainingTime) > 0) {
return remainingTime;
}
return backoff;
}
private boolean shouldRetryLocally(SessionRetryOptions sessionRetryOptions, int sessionTokenMismatchRetryAttempts) {
if (sessionRetryOptions == null) {
return true;
}
CosmosRegionSwitchHint regionSwitchHint = sessionRetryOptionsAccessor
.getRegionSwitchHint(sessionRetryOptions);
if (regionSwitchHint != CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED) {
return true;
}
return sessionTokenMismatchRetryAttempts <= (this.maxRetryAttemptsInCurrentRegion.get() - 1);
}
} | class SessionTokenMismatchRetryPolicy implements IRetryPolicy {
private final static ImplementationBridgeHelpers.CosmosSessionRetryOptionsHelper.CosmosSessionRetryOptionsAccessor
sessionRetryOptionsAccessor = ImplementationBridgeHelpers
.CosmosSessionRetryOptionsHelper
.getCosmosSessionRetryOptionsAccessor();
private final static Logger LOGGER = LoggerFactory.getLogger(SessionTokenMismatchRetryPolicy.class);
private static final int BACKOFF_MULTIPLIER = 5;
private final Duration maximumBackoff;
private final TimeoutHelper waitTimeTimeoutHelper;
private final AtomicInteger retryCount;
private Duration currentBackoff;
private RetryContext retryContext;
private final AtomicInteger maxRetryAttemptsInCurrentRegion;
private final CosmosRegionSwitchHint regionSwitchHint;
private final Duration minInRegionRetryTime;
public SessionTokenMismatchRetryPolicy(
RetryContext retryContext,
SessionRetryOptions sessionRetryOptions) {
this.waitTimeTimeoutHelper = new TimeoutHelper(Duration.ofMillis(Configs.getSessionTokenMismatchDefaultWaitTimeInMs()));
this.maximumBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchMaximumBackoffTimeInMs());
this.retryCount = new AtomicInteger();
this.retryCount.set(0);
this.currentBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchInitialBackoffTimeInMs());
if (sessionRetryOptions != null) {
this.maxRetryAttemptsInCurrentRegion =
new AtomicInteger(sessionRetryOptionsAccessor.getMaxInRegionRetryCount(sessionRetryOptions));
this.regionSwitchHint = sessionRetryOptionsAccessor.getRegionSwitchHint(sessionRetryOptions);
this.minInRegionRetryTime = sessionRetryOptionsAccessor.getMinInRegionRetryTime(sessionRetryOptions);
} else {
this.maxRetryAttemptsInCurrentRegion = null;
this.regionSwitchHint = CosmosRegionSwitchHint.LOCAL_REGION_PREFERRED;
this.minInRegionRetryTime = null;
}
this.retryContext = retryContext;
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private static Duration getEffectiveBackoff(Duration backoff, Duration remainingTime) {
if (backoff.compareTo(remainingTime) > 0) {
return remainingTime;
}
return backoff;
}
private boolean shouldRetryLocally(CosmosRegionSwitchHint regionSwitchHint, int sessionTokenMismatchRetryAttempts) {
if (regionSwitchHint != CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED) {
return true;
}
return sessionTokenMismatchRetryAttempts <= (this.maxRetryAttemptsInCurrentRegion.get() - 1);
}
} |
I think there is a misunderstanding - the if statement will only be entered for the last retry. Goal here is to allow avoiding high number of retries to "fill" the minRetryTime (which has to be about the replication latency between regions). | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (!(e instanceof CosmosException)) {
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
CosmosException cosmosException = (CosmosException)e;
if (cosmosException.getStatusCode() != HttpConstants.StatusCodes.NOTFOUND ||
cosmosException.getSubStatusCode() != HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE) {
LOGGER.debug(
"SessionTokenMismatchRetryPolicy not retrying because StatusCode or SubStatusCode not found.");
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
if (this.waitTimeTimeoutHelper.isElapsed()) {
LOGGER.warn(
"SessionTokenMismatchRetryPolicy not retrying because it has exceeded " +
"the time limit. Retry count = {}",
this.retryCount);
return Mono.just(ShouldRetryResult.noRetry());
}
if (!shouldRetryLocally(sessionRetryOptions, retryCount.get())) {
LOGGER.debug("SessionTokenMismatchRetryPolicy not retrying because it a retry attempt for the current region and " +
"fallback to a different region is preferred ");
return Mono.just(ShouldRetryResult.noRetry());
}
Duration effectiveBackoff = Duration.ZERO;
int attempt = this.retryCount.getAndIncrement();
if (attempt > 0) {
effectiveBackoff = getEffectiveBackoff(
this.currentBackoff,
this.waitTimeTimeoutHelper.getRemainingTime());
this.currentBackoff = getEffectiveBackoff(
Duration.ofMillis(this.currentBackoff.toMillis() * BACKOFF_MULTIPLIER),
this.maximumBackoff);
}
if (sessionRetryOptionsAccessor.getRegionSwitchHint(sessionRetryOptions) ==
CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED
&& attempt >= (this.maxRetryAttemptsInCurrentRegion.get() - 1)) {
Duration remainingMinRetryTimeInLocalRegion = this.waitTimeTimeoutHelper.getRemainingTime(
sessionRetryOptionsAccessor.getMinInRegionRetryTime(this.sessionRetryOptions)
);
if (remainingMinRetryTimeInLocalRegion.compareTo(effectiveBackoff) > 0) {
effectiveBackoff = remainingMinRetryTimeInLocalRegion;
}
}
LOGGER.debug(
"SessionTokenMismatchRetryPolicy will retry. Retry count = {}. Backoff time = {} ms",
this.retryCount,
effectiveBackoff.toMillis());
return Mono.just(ShouldRetryResult.retryAfter(effectiveBackoff));
} | && attempt >= (this.maxRetryAttemptsInCurrentRegion.get() - 1)) { | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (!(e instanceof CosmosException)) {
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
CosmosException cosmosException = (CosmosException)e;
if (cosmosException.getStatusCode() != HttpConstants.StatusCodes.NOTFOUND ||
cosmosException.getSubStatusCode() != HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE) {
LOGGER.debug(
"SessionTokenMismatchRetryPolicy not retrying because StatusCode or SubStatusCode not found.");
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
if (this.waitTimeTimeoutHelper.isElapsed()) {
LOGGER.warn(
"SessionTokenMismatchRetryPolicy not retrying because it has exceeded " +
"the time limit. Retry count = {}",
this.retryCount);
return Mono.just(ShouldRetryResult.noRetry());
}
if (!shouldRetryLocally(regionSwitchHint, retryCount.get())) {
LOGGER.debug("SessionTokenMismatchRetryPolicy not retrying because it a retry attempt for the current region and " +
"fallback to a different region is preferred ");
return Mono.just(ShouldRetryResult.noRetry());
}
Duration effectiveBackoff = Duration.ZERO;
int attempt = this.retryCount.getAndIncrement();
if (attempt > 0) {
effectiveBackoff = getEffectiveBackoff(
this.currentBackoff,
this.waitTimeTimeoutHelper.getRemainingTime());
this.currentBackoff = getEffectiveBackoff(
Duration.ofMillis(this.currentBackoff.toMillis() * BACKOFF_MULTIPLIER),
this.maximumBackoff);
}
if (regionSwitchHint == CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED
&& attempt >= (this.maxRetryAttemptsInCurrentRegion.get() - 1)) {
Duration remainingMinRetryTimeInLocalRegion =
this.waitTimeTimeoutHelper.getRemainingTime(minInRegionRetryTime);
if (remainingMinRetryTimeInLocalRegion.compareTo(effectiveBackoff) > 0) {
effectiveBackoff = remainingMinRetryTimeInLocalRegion;
}
}
LOGGER.debug(
"SessionTokenMismatchRetryPolicy will retry. Retry count = {}. Backoff time = {} ms",
this.retryCount,
effectiveBackoff.toMillis());
return Mono.just(ShouldRetryResult.retryAfter(effectiveBackoff));
} | class SessionTokenMismatchRetryPolicy implements IRetryPolicy {
private final static ImplementationBridgeHelpers.CosmosSessionRetryOptionsHelper.CosmosSessionRetryOptionsAccessor
sessionRetryOptionsAccessor = ImplementationBridgeHelpers
.CosmosSessionRetryOptionsHelper
.getCosmosSessionRetryOptionsAccessor();
private final static Logger LOGGER = LoggerFactory.getLogger(SessionTokenMismatchRetryPolicy.class);
private static final int BACKOFF_MULTIPLIER = 5;
private final Duration maximumBackoff;
private final TimeoutHelper waitTimeTimeoutHelper;
private final AtomicInteger retryCount;
private Duration currentBackoff;
private RetryContext retryContext;
private final AtomicInteger maxRetryAttemptsInCurrentRegion;
private final SessionRetryOptions sessionRetryOptions;
public SessionTokenMismatchRetryPolicy(
RetryContext retryContext,
SessionRetryOptions sessionRetryOptions) {
this.waitTimeTimeoutHelper = new TimeoutHelper(Duration.ofMillis(Configs.getSessionTokenMismatchDefaultWaitTimeInMs()));
this.maximumBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchMaximumBackoffTimeInMs());
this.retryCount = new AtomicInteger();
this.retryCount.set(0);
this.currentBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchInitialBackoffTimeInMs());
this.maxRetryAttemptsInCurrentRegion =
new AtomicInteger(sessionRetryOptionsAccessor.getMaxInRegionRetryCount(sessionRetryOptions));
this.retryContext = retryContext;
this.sessionRetryOptions = sessionRetryOptions;
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private static Duration getEffectiveBackoff(Duration backoff, Duration remainingTime) {
if (backoff.compareTo(remainingTime) > 0) {
return remainingTime;
}
return backoff;
}
private boolean shouldRetryLocally(SessionRetryOptions sessionRetryOptions, int sessionTokenMismatchRetryAttempts) {
if (sessionRetryOptions == null) {
return true;
}
CosmosRegionSwitchHint regionSwitchHint = sessionRetryOptionsAccessor
.getRegionSwitchHint(sessionRetryOptions);
if (regionSwitchHint != CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED) {
return true;
}
return sessionTokenMismatchRetryAttempts <= (this.maxRetryAttemptsInCurrentRegion.get() - 1);
}
} | class SessionTokenMismatchRetryPolicy implements IRetryPolicy {
private final static ImplementationBridgeHelpers.CosmosSessionRetryOptionsHelper.CosmosSessionRetryOptionsAccessor
sessionRetryOptionsAccessor = ImplementationBridgeHelpers
.CosmosSessionRetryOptionsHelper
.getCosmosSessionRetryOptionsAccessor();
private final static Logger LOGGER = LoggerFactory.getLogger(SessionTokenMismatchRetryPolicy.class);
private static final int BACKOFF_MULTIPLIER = 5;
private final Duration maximumBackoff;
private final TimeoutHelper waitTimeTimeoutHelper;
private final AtomicInteger retryCount;
private Duration currentBackoff;
private RetryContext retryContext;
private final AtomicInteger maxRetryAttemptsInCurrentRegion;
private final CosmosRegionSwitchHint regionSwitchHint;
private final Duration minInRegionRetryTime;
public SessionTokenMismatchRetryPolicy(
RetryContext retryContext,
SessionRetryOptions sessionRetryOptions) {
this.waitTimeTimeoutHelper = new TimeoutHelper(Duration.ofMillis(Configs.getSessionTokenMismatchDefaultWaitTimeInMs()));
this.maximumBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchMaximumBackoffTimeInMs());
this.retryCount = new AtomicInteger();
this.retryCount.set(0);
this.currentBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchInitialBackoffTimeInMs());
if (sessionRetryOptions != null) {
this.maxRetryAttemptsInCurrentRegion =
new AtomicInteger(sessionRetryOptionsAccessor.getMaxInRegionRetryCount(sessionRetryOptions));
this.regionSwitchHint = sessionRetryOptionsAccessor.getRegionSwitchHint(sessionRetryOptions);
this.minInRegionRetryTime = sessionRetryOptionsAccessor.getMinInRegionRetryTime(sessionRetryOptions);
} else {
this.maxRetryAttemptsInCurrentRegion = null;
this.regionSwitchHint = CosmosRegionSwitchHint.LOCAL_REGION_PREFERRED;
this.minInRegionRetryTime = null;
}
this.retryContext = retryContext;
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private static Duration getEffectiveBackoff(Duration backoff, Duration remainingTime) {
if (backoff.compareTo(remainingTime) > 0) {
return remainingTime;
}
return backoff;
}
private boolean shouldRetryLocally(CosmosRegionSwitchHint regionSwitchHint, int sessionTokenMismatchRetryAttempts) {
if (regionSwitchHint != CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED) {
return true;
}
return sessionTokenMismatchRetryAttempts <= (this.maxRetryAttemptsInCurrentRegion.get() - 1);
}
} |
Not a problem - will only ever be used when it is larger than the effectiveBackoff | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (!(e instanceof CosmosException)) {
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
CosmosException cosmosException = (CosmosException)e;
if (cosmosException.getStatusCode() != HttpConstants.StatusCodes.NOTFOUND ||
cosmosException.getSubStatusCode() != HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE) {
LOGGER.debug(
"SessionTokenMismatchRetryPolicy not retrying because StatusCode or SubStatusCode not found.");
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
if (this.waitTimeTimeoutHelper.isElapsed()) {
LOGGER.warn(
"SessionTokenMismatchRetryPolicy not retrying because it has exceeded " +
"the time limit. Retry count = {}",
this.retryCount);
return Mono.just(ShouldRetryResult.noRetry());
}
if (!shouldRetryLocally(sessionRetryOptions, retryCount.get())) {
LOGGER.debug("SessionTokenMismatchRetryPolicy not retrying because it a retry attempt for the current region and " +
"fallback to a different region is preferred ");
return Mono.just(ShouldRetryResult.noRetry());
}
Duration effectiveBackoff = Duration.ZERO;
int attempt = this.retryCount.getAndIncrement();
if (attempt > 0) {
effectiveBackoff = getEffectiveBackoff(
this.currentBackoff,
this.waitTimeTimeoutHelper.getRemainingTime());
this.currentBackoff = getEffectiveBackoff(
Duration.ofMillis(this.currentBackoff.toMillis() * BACKOFF_MULTIPLIER),
this.maximumBackoff);
}
if (sessionRetryOptionsAccessor.getRegionSwitchHint(sessionRetryOptions) ==
CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED
&& attempt >= (this.maxRetryAttemptsInCurrentRegion.get() - 1)) {
Duration remainingMinRetryTimeInLocalRegion = this.waitTimeTimeoutHelper.getRemainingTime(
sessionRetryOptionsAccessor.getMinInRegionRetryTime(this.sessionRetryOptions)
);
if (remainingMinRetryTimeInLocalRegion.compareTo(effectiveBackoff) > 0) {
effectiveBackoff = remainingMinRetryTimeInLocalRegion;
}
}
LOGGER.debug(
"SessionTokenMismatchRetryPolicy will retry. Retry count = {}. Backoff time = {} ms",
this.retryCount,
effectiveBackoff.toMillis());
return Mono.just(ShouldRetryResult.retryAfter(effectiveBackoff));
} | sessionRetryOptionsAccessor.getMinInRegionRetryTime(this.sessionRetryOptions) | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (!(e instanceof CosmosException)) {
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
CosmosException cosmosException = (CosmosException)e;
if (cosmosException.getStatusCode() != HttpConstants.StatusCodes.NOTFOUND ||
cosmosException.getSubStatusCode() != HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE) {
LOGGER.debug(
"SessionTokenMismatchRetryPolicy not retrying because StatusCode or SubStatusCode not found.");
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
if (this.waitTimeTimeoutHelper.isElapsed()) {
LOGGER.warn(
"SessionTokenMismatchRetryPolicy not retrying because it has exceeded " +
"the time limit. Retry count = {}",
this.retryCount);
return Mono.just(ShouldRetryResult.noRetry());
}
if (!shouldRetryLocally(regionSwitchHint, retryCount.get())) {
LOGGER.debug("SessionTokenMismatchRetryPolicy not retrying because it a retry attempt for the current region and " +
"fallback to a different region is preferred ");
return Mono.just(ShouldRetryResult.noRetry());
}
Duration effectiveBackoff = Duration.ZERO;
int attempt = this.retryCount.getAndIncrement();
if (attempt > 0) {
effectiveBackoff = getEffectiveBackoff(
this.currentBackoff,
this.waitTimeTimeoutHelper.getRemainingTime());
this.currentBackoff = getEffectiveBackoff(
Duration.ofMillis(this.currentBackoff.toMillis() * BACKOFF_MULTIPLIER),
this.maximumBackoff);
}
if (regionSwitchHint == CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED
&& attempt >= (this.maxRetryAttemptsInCurrentRegion.get() - 1)) {
Duration remainingMinRetryTimeInLocalRegion =
this.waitTimeTimeoutHelper.getRemainingTime(minInRegionRetryTime);
if (remainingMinRetryTimeInLocalRegion.compareTo(effectiveBackoff) > 0) {
effectiveBackoff = remainingMinRetryTimeInLocalRegion;
}
}
LOGGER.debug(
"SessionTokenMismatchRetryPolicy will retry. Retry count = {}. Backoff time = {} ms",
this.retryCount,
effectiveBackoff.toMillis());
return Mono.just(ShouldRetryResult.retryAfter(effectiveBackoff));
} | class SessionTokenMismatchRetryPolicy implements IRetryPolicy {
private final static ImplementationBridgeHelpers.CosmosSessionRetryOptionsHelper.CosmosSessionRetryOptionsAccessor
sessionRetryOptionsAccessor = ImplementationBridgeHelpers
.CosmosSessionRetryOptionsHelper
.getCosmosSessionRetryOptionsAccessor();
private final static Logger LOGGER = LoggerFactory.getLogger(SessionTokenMismatchRetryPolicy.class);
private static final int BACKOFF_MULTIPLIER = 5;
private final Duration maximumBackoff;
private final TimeoutHelper waitTimeTimeoutHelper;
private final AtomicInteger retryCount;
private Duration currentBackoff;
private RetryContext retryContext;
private final AtomicInteger maxRetryAttemptsInCurrentRegion;
private final SessionRetryOptions sessionRetryOptions;
public SessionTokenMismatchRetryPolicy(
RetryContext retryContext,
SessionRetryOptions sessionRetryOptions) {
this.waitTimeTimeoutHelper = new TimeoutHelper(Duration.ofMillis(Configs.getSessionTokenMismatchDefaultWaitTimeInMs()));
this.maximumBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchMaximumBackoffTimeInMs());
this.retryCount = new AtomicInteger();
this.retryCount.set(0);
this.currentBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchInitialBackoffTimeInMs());
this.maxRetryAttemptsInCurrentRegion =
new AtomicInteger(sessionRetryOptionsAccessor.getMaxInRegionRetryCount(sessionRetryOptions));
this.retryContext = retryContext;
this.sessionRetryOptions = sessionRetryOptions;
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private static Duration getEffectiveBackoff(Duration backoff, Duration remainingTime) {
if (backoff.compareTo(remainingTime) > 0) {
return remainingTime;
}
return backoff;
}
private boolean shouldRetryLocally(SessionRetryOptions sessionRetryOptions, int sessionTokenMismatchRetryAttempts) {
if (sessionRetryOptions == null) {
return true;
}
CosmosRegionSwitchHint regionSwitchHint = sessionRetryOptionsAccessor
.getRegionSwitchHint(sessionRetryOptions);
if (regionSwitchHint != CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED) {
return true;
}
return sessionTokenMismatchRetryAttempts <= (this.maxRetryAttemptsInCurrentRegion.get() - 1);
}
} | class SessionTokenMismatchRetryPolicy implements IRetryPolicy {
private final static ImplementationBridgeHelpers.CosmosSessionRetryOptionsHelper.CosmosSessionRetryOptionsAccessor
sessionRetryOptionsAccessor = ImplementationBridgeHelpers
.CosmosSessionRetryOptionsHelper
.getCosmosSessionRetryOptionsAccessor();
private final static Logger LOGGER = LoggerFactory.getLogger(SessionTokenMismatchRetryPolicy.class);
private static final int BACKOFF_MULTIPLIER = 5;
private final Duration maximumBackoff;
private final TimeoutHelper waitTimeTimeoutHelper;
private final AtomicInteger retryCount;
private Duration currentBackoff;
private RetryContext retryContext;
private final AtomicInteger maxRetryAttemptsInCurrentRegion;
private final CosmosRegionSwitchHint regionSwitchHint;
private final Duration minInRegionRetryTime;
public SessionTokenMismatchRetryPolicy(
RetryContext retryContext,
SessionRetryOptions sessionRetryOptions) {
this.waitTimeTimeoutHelper = new TimeoutHelper(Duration.ofMillis(Configs.getSessionTokenMismatchDefaultWaitTimeInMs()));
this.maximumBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchMaximumBackoffTimeInMs());
this.retryCount = new AtomicInteger();
this.retryCount.set(0);
this.currentBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchInitialBackoffTimeInMs());
if (sessionRetryOptions != null) {
this.maxRetryAttemptsInCurrentRegion =
new AtomicInteger(sessionRetryOptionsAccessor.getMaxInRegionRetryCount(sessionRetryOptions));
this.regionSwitchHint = sessionRetryOptionsAccessor.getRegionSwitchHint(sessionRetryOptions);
this.minInRegionRetryTime = sessionRetryOptionsAccessor.getMinInRegionRetryTime(sessionRetryOptions);
} else {
this.maxRetryAttemptsInCurrentRegion = null;
this.regionSwitchHint = CosmosRegionSwitchHint.LOCAL_REGION_PREFERRED;
this.minInRegionRetryTime = null;
}
this.retryContext = retryContext;
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private static Duration getEffectiveBackoff(Duration backoff, Duration remainingTime) {
if (backoff.compareTo(remainingTime) > 0) {
return remainingTime;
}
return backoff;
}
private boolean shouldRetryLocally(CosmosRegionSwitchHint regionSwitchHint, int sessionTokenMismatchRetryAttempts) {
if (regionSwitchHint != CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED) {
return true;
}
return sessionTokenMismatchRetryAttempts <= (this.maxRetryAttemptsInCurrentRegion.get() - 1);
}
} |
Discussed and resolved offline | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (!(e instanceof CosmosException)) {
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
CosmosException cosmosException = (CosmosException)e;
if (cosmosException.getStatusCode() != HttpConstants.StatusCodes.NOTFOUND ||
cosmosException.getSubStatusCode() != HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE) {
LOGGER.debug(
"SessionTokenMismatchRetryPolicy not retrying because StatusCode or SubStatusCode not found.");
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
if (this.waitTimeTimeoutHelper.isElapsed()) {
LOGGER.warn(
"SessionTokenMismatchRetryPolicy not retrying because it has exceeded " +
"the time limit. Retry count = {}",
this.retryCount);
return Mono.just(ShouldRetryResult.noRetry());
}
if (!shouldRetryLocally(sessionRetryOptions, retryCount.get())) {
LOGGER.debug("SessionTokenMismatchRetryPolicy not retrying because it a retry attempt for the current region and " +
"fallback to a different region is preferred ");
return Mono.just(ShouldRetryResult.noRetry());
}
Duration effectiveBackoff = Duration.ZERO;
int attempt = this.retryCount.getAndIncrement();
if (attempt > 0) {
effectiveBackoff = getEffectiveBackoff(
this.currentBackoff,
this.waitTimeTimeoutHelper.getRemainingTime());
this.currentBackoff = getEffectiveBackoff(
Duration.ofMillis(this.currentBackoff.toMillis() * BACKOFF_MULTIPLIER),
this.maximumBackoff);
}
if (sessionRetryOptionsAccessor.getRegionSwitchHint(sessionRetryOptions) ==
CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED
&& attempt >= (this.maxRetryAttemptsInCurrentRegion.get() - 1)) {
Duration remainingMinRetryTimeInLocalRegion = this.waitTimeTimeoutHelper.getRemainingTime(
sessionRetryOptionsAccessor.getMinInRegionRetryTime(this.sessionRetryOptions)
);
if (remainingMinRetryTimeInLocalRegion.compareTo(effectiveBackoff) > 0) {
effectiveBackoff = remainingMinRetryTimeInLocalRegion;
}
}
LOGGER.debug(
"SessionTokenMismatchRetryPolicy will retry. Retry count = {}. Backoff time = {} ms",
this.retryCount,
effectiveBackoff.toMillis());
return Mono.just(ShouldRetryResult.retryAfter(effectiveBackoff));
} | && attempt >= (this.maxRetryAttemptsInCurrentRegion.get() - 1)) { | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (!(e instanceof CosmosException)) {
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
CosmosException cosmosException = (CosmosException)e;
if (cosmosException.getStatusCode() != HttpConstants.StatusCodes.NOTFOUND ||
cosmosException.getSubStatusCode() != HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE) {
LOGGER.debug(
"SessionTokenMismatchRetryPolicy not retrying because StatusCode or SubStatusCode not found.");
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
if (this.waitTimeTimeoutHelper.isElapsed()) {
LOGGER.warn(
"SessionTokenMismatchRetryPolicy not retrying because it has exceeded " +
"the time limit. Retry count = {}",
this.retryCount);
return Mono.just(ShouldRetryResult.noRetry());
}
if (!shouldRetryLocally(regionSwitchHint, retryCount.get())) {
LOGGER.debug("SessionTokenMismatchRetryPolicy not retrying because it a retry attempt for the current region and " +
"fallback to a different region is preferred ");
return Mono.just(ShouldRetryResult.noRetry());
}
Duration effectiveBackoff = Duration.ZERO;
int attempt = this.retryCount.getAndIncrement();
if (attempt > 0) {
effectiveBackoff = getEffectiveBackoff(
this.currentBackoff,
this.waitTimeTimeoutHelper.getRemainingTime());
this.currentBackoff = getEffectiveBackoff(
Duration.ofMillis(this.currentBackoff.toMillis() * BACKOFF_MULTIPLIER),
this.maximumBackoff);
}
if (regionSwitchHint == CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED
&& attempt >= (this.maxRetryAttemptsInCurrentRegion.get() - 1)) {
Duration remainingMinRetryTimeInLocalRegion =
this.waitTimeTimeoutHelper.getRemainingTime(minInRegionRetryTime);
if (remainingMinRetryTimeInLocalRegion.compareTo(effectiveBackoff) > 0) {
effectiveBackoff = remainingMinRetryTimeInLocalRegion;
}
}
LOGGER.debug(
"SessionTokenMismatchRetryPolicy will retry. Retry count = {}. Backoff time = {} ms",
this.retryCount,
effectiveBackoff.toMillis());
return Mono.just(ShouldRetryResult.retryAfter(effectiveBackoff));
} | class SessionTokenMismatchRetryPolicy implements IRetryPolicy {
private final static ImplementationBridgeHelpers.CosmosSessionRetryOptionsHelper.CosmosSessionRetryOptionsAccessor
sessionRetryOptionsAccessor = ImplementationBridgeHelpers
.CosmosSessionRetryOptionsHelper
.getCosmosSessionRetryOptionsAccessor();
private final static Logger LOGGER = LoggerFactory.getLogger(SessionTokenMismatchRetryPolicy.class);
private static final int BACKOFF_MULTIPLIER = 5;
private final Duration maximumBackoff;
private final TimeoutHelper waitTimeTimeoutHelper;
private final AtomicInteger retryCount;
private Duration currentBackoff;
private RetryContext retryContext;
private final AtomicInteger maxRetryAttemptsInCurrentRegion;
private final SessionRetryOptions sessionRetryOptions;
public SessionTokenMismatchRetryPolicy(
RetryContext retryContext,
SessionRetryOptions sessionRetryOptions) {
this.waitTimeTimeoutHelper = new TimeoutHelper(Duration.ofMillis(Configs.getSessionTokenMismatchDefaultWaitTimeInMs()));
this.maximumBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchMaximumBackoffTimeInMs());
this.retryCount = new AtomicInteger();
this.retryCount.set(0);
this.currentBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchInitialBackoffTimeInMs());
this.maxRetryAttemptsInCurrentRegion =
new AtomicInteger(sessionRetryOptionsAccessor.getMaxInRegionRetryCount(sessionRetryOptions));
this.retryContext = retryContext;
this.sessionRetryOptions = sessionRetryOptions;
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private static Duration getEffectiveBackoff(Duration backoff, Duration remainingTime) {
if (backoff.compareTo(remainingTime) > 0) {
return remainingTime;
}
return backoff;
}
private boolean shouldRetryLocally(SessionRetryOptions sessionRetryOptions, int sessionTokenMismatchRetryAttempts) {
if (sessionRetryOptions == null) {
return true;
}
CosmosRegionSwitchHint regionSwitchHint = sessionRetryOptionsAccessor
.getRegionSwitchHint(sessionRetryOptions);
if (regionSwitchHint != CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED) {
return true;
}
return sessionTokenMismatchRetryAttempts <= (this.maxRetryAttemptsInCurrentRegion.get() - 1);
}
} | class SessionTokenMismatchRetryPolicy implements IRetryPolicy {
private final static ImplementationBridgeHelpers.CosmosSessionRetryOptionsHelper.CosmosSessionRetryOptionsAccessor
sessionRetryOptionsAccessor = ImplementationBridgeHelpers
.CosmosSessionRetryOptionsHelper
.getCosmosSessionRetryOptionsAccessor();
private final static Logger LOGGER = LoggerFactory.getLogger(SessionTokenMismatchRetryPolicy.class);
private static final int BACKOFF_MULTIPLIER = 5;
private final Duration maximumBackoff;
private final TimeoutHelper waitTimeTimeoutHelper;
private final AtomicInteger retryCount;
private Duration currentBackoff;
private RetryContext retryContext;
private final AtomicInteger maxRetryAttemptsInCurrentRegion;
private final CosmosRegionSwitchHint regionSwitchHint;
private final Duration minInRegionRetryTime;
public SessionTokenMismatchRetryPolicy(
RetryContext retryContext,
SessionRetryOptions sessionRetryOptions) {
this.waitTimeTimeoutHelper = new TimeoutHelper(Duration.ofMillis(Configs.getSessionTokenMismatchDefaultWaitTimeInMs()));
this.maximumBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchMaximumBackoffTimeInMs());
this.retryCount = new AtomicInteger();
this.retryCount.set(0);
this.currentBackoff = Duration.ofMillis(Configs.getSessionTokenMismatchInitialBackoffTimeInMs());
if (sessionRetryOptions != null) {
this.maxRetryAttemptsInCurrentRegion =
new AtomicInteger(sessionRetryOptionsAccessor.getMaxInRegionRetryCount(sessionRetryOptions));
this.regionSwitchHint = sessionRetryOptionsAccessor.getRegionSwitchHint(sessionRetryOptions);
this.minInRegionRetryTime = sessionRetryOptionsAccessor.getMinInRegionRetryTime(sessionRetryOptions);
} else {
this.maxRetryAttemptsInCurrentRegion = null;
this.regionSwitchHint = CosmosRegionSwitchHint.LOCAL_REGION_PREFERRED;
this.minInRegionRetryTime = null;
}
this.retryContext = retryContext;
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
private static Duration getEffectiveBackoff(Duration backoff, Duration remainingTime) {
if (backoff.compareTo(remainingTime) > 0) {
return remainingTime;
}
return backoff;
}
private boolean shouldRetryLocally(CosmosRegionSwitchHint regionSwitchHint, int sessionTokenMismatchRetryAttempts) {
if (regionSwitchHint != CosmosRegionSwitchHint.REMOTE_REGION_PREFERRED) {
return true;
}
return sessionTokenMismatchRetryAttempts <= (this.maxRetryAttemptsInCurrentRegion.get() - 1);
}
} |
I missed this in the earlier PR, but need to use `getConnectionString()` here, otherwise env var isn't considered | private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
StatsbeatModule statsbeatModule = null;
if (connectionString != null) {
statsbeatModule = new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
return statsbeatModule;
} | if (connectionString != null) { | private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME ";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME ";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.createForTest(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(DefaultConfigProperties.create(Collections.emptyMap()));
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, config) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(config);
spanExporter = buildTraceExporter(config);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, config) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(config);
metricExporter = buildMetricExporter(config);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, config) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(config);
logRecordExporter = buildLogRecordExporter(config);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
if (statsbeatModule != null) {
startStatsbeatModule(statsbeatModule, configProperties);
}
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
@Nullable
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
connectionString::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
}
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
}
} |
same here | private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
connectionString::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
} | connectionString::getInstrumentationKey, | private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME ";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME ";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.createForTest(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(DefaultConfigProperties.create(Collections.emptyMap()));
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, config) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(config);
spanExporter = buildTraceExporter(config);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, config) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(config);
metricExporter = buildMetricExporter(config);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, config) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(config);
logRecordExporter = buildLogRecordExporter(config);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
if (statsbeatModule != null) {
startStatsbeatModule(statsbeatModule, configProperties);
}
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
@Nullable
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
StatsbeatModule statsbeatModule = null;
if (connectionString != null) {
statsbeatModule = new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
return statsbeatModule;
}
private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
}
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
}
} |
can these be renamed back to `configProperties` (or did you prefer `config`?) | public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, config) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(config);
spanExporter = buildTraceExporter(config);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, config) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(config);
metricExporter = buildMetricExporter(config);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, config) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(config);
logRecordExporter = buildLogRecordExporter(config);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
} | (spanExporter, config) -> { | public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME ";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME ";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.createForTest(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(DefaultConfigProperties.create(Collections.emptyMap()));
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
if (statsbeatModule != null) {
startStatsbeatModule(statsbeatModule, configProperties);
}
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
@Nullable
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
StatsbeatModule statsbeatModule = null;
if (connectionString != null) {
statsbeatModule = new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
return statsbeatModule;
}
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
connectionString::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
}
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
}
} |
(doesn't have to be done in this PR) ```suggestion return buildLogRecordExporter(defaultConfig); ``` | public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(DefaultConfigProperties.create(Collections.emptyMap()));
} | return buildLogRecordExporter(DefaultConfigProperties.create(Collections.emptyMap())); | public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
}
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
}
} |
can `getConnectionString(configProperties)` return null? | private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
} | getConnectionString(configProperties)::getInstrumentationKey, | private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(DefaultConfigProperties.create(Collections.emptyMap()));
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
}
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
}
} |
no, it will never null. it will throw an exception if it does. | private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
} | getConnectionString(configProperties)::getInstrumentationKey, | private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(DefaultConfigProperties.create(Collections.emptyMap()));
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
}
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
}
} |
that's what the spotbug finds after i changed it from instance variable to get method. | private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
} | getConnectionString(configProperties)::getInstrumentationKey, | private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(DefaultConfigProperties.create(Collections.emptyMap()));
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
}
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
}
} |
In theory the timeout could be shortened further but 2 seconds seems reasonable to prevent cases where the service response is slow being confused with fault injection. | private HttpClient getFaultInjectingWrappedHttpClient() {
switch (ENVIRONMENT.getHttpClientType()) {
case NETTY:
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider(NettyAsyncHttpClientProvider.class));
case OK_HTTP:
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider(OkHttpAsyncClientProvider.class));
default:
throw new IllegalArgumentException("Unknown http client type: " + ENVIRONMENT.getHttpClientType());
}
} | .readTimeout(Duration.ofSeconds(2)) | private HttpClient getFaultInjectingWrappedHttpClient() {
switch (ENVIRONMENT.getHttpClientType()) {
case NETTY:
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider(NettyAsyncHttpClientProvider.class));
case OK_HTTP:
return HttpClient.createDefault(new HttpClientOptions()
.readTimeout(Duration.ofSeconds(2))
.responseTimeout(Duration.ofSeconds(2))
.setHttpClientProvider(OkHttpAsyncClientProvider.class));
default:
throw new IllegalArgumentException("Unknown http client type: " + ENVIRONMENT.getHttpClientType());
}
} | class HttpFaultInjectingTests extends BlobTestBase {
private static final ClientLogger LOGGER = new ClientLogger(HttpFaultInjectingTests.class);
private static final HttpHeaderName UPSTREAM_URI_HEADER = HttpHeaderName.fromString("X-Upstream-Base-Uri");
private static final HttpHeaderName HTTP_FAULT_INJECTOR_RESPONSE_HEADER
= HttpHeaderName.fromString("x-ms-faultinjector-response-option");
/**
* Tests downloading to file with fault injection.
*
* This test will upload a single blob of about 9MB and then download it in parallel 500 times. Each download will
* have its file contents compared to the original blob data. The test only cares about files that were properly
* downloaded, if a download fails with a network error it will be ignored. A requirement of 90% of files being
* successfully downloaded is also a requirement to prevent a case where most files failed to download and passing,
* hiding a true issue.
*/
@Test
public void downloadToFileWithFaultInjection() throws IOException {
byte[] realFileBytes = new byte[9 * Constants.MB - 1];
ThreadLocalRandom.current().nextBytes(realFileBytes);
String blobName = generateBlobName();
cc.getBlobClient(blobName).upload(BinaryData.fromBytes(realFileBytes), true);
BlobClient downloadClient = new BlobClientBuilder()
.connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString())
.containerName(cc.getBlobContainerName())
.blobName(blobName)
.httpClient(new HttpFaultInjectingHttpClient(getFaultInjectingWrappedHttpClient()))
.retryOptions(new RequestRetryOptions(RetryPolicyType.FIXED, 4, null, 10L, 10L, null))
.buildClient();
List<File> files = new ArrayList<>(500);
for (int i = 0; i < 500; i++) {
File file = File.createTempFile(UUID.randomUUID().toString() + i, ".txt");
file.deleteOnExit();
files.add(file);
}
AtomicInteger successCount = new AtomicInteger();
files.stream().parallel().forEach(it -> {
try {
downloadClient.downloadToFile(it.getAbsolutePath(), true);
byte[] actualFileBytes = Files.readAllBytes(it.toPath());
TestUtils.assertArraysEqual(realFileBytes, actualFileBytes);
successCount.incrementAndGet();
Files.deleteIfExists(it.toPath());
} catch (Exception ex) {
LOGGER.atWarning().log(() -> "Failed to complete download, target download file: "
+ it.getAbsolutePath(), ex);
}
});
assertTrue(successCount.get() >= 450);
files.forEach(it -> {
try {
Files.deleteIfExists(it.toPath());
} catch (IOException e) {
LOGGER.atWarning().log(() -> "Failed to delete file: " + it.getAbsolutePath(), e);
}
});
}
private static final class HttpFaultInjectingHttpClient implements HttpClient {
private final HttpClient wrappedHttpClient;
HttpFaultInjectingHttpClient(HttpClient wrappedHttpClient) {
this.wrappedHttpClient = wrappedHttpClient;
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
public Mono<HttpResponse> send(HttpRequest request, Context context) {
URL originalUrl = request.getUrl();
request.setHeader(UPSTREAM_URI_HEADER, originalUrl.toString()).setUrl(rewriteUrl(originalUrl));
String faultType = faultInjectorHandling();
request.setHeader(HTTP_FAULT_INJECTOR_RESPONSE_HEADER, faultType);
return wrappedHttpClient.send(request, context)
.map(response -> {
HttpRequest request1 = response.getRequest();
request1.getHeaders().remove(UPSTREAM_URI_HEADER);
request1.setUrl(originalUrl);
return response;
});
}
@Override
public HttpResponse sendSync(HttpRequest request, Context context) {
URL originalUrl = request.getUrl();
request.setHeader(UPSTREAM_URI_HEADER, originalUrl.toString()).setUrl(rewriteUrl(originalUrl));
String faultType = faultInjectorHandling();
request.setHeader(HTTP_FAULT_INJECTOR_RESPONSE_HEADER, faultType);
HttpResponse response = wrappedHttpClient.sendSync(request, context);
response.getRequest().setUrl(originalUrl);
response.getRequest().getHeaders().remove(UPSTREAM_URI_HEADER);
return response;
}
private static URL rewriteUrl(URL originalUrl) {
try {
return UrlBuilder.parse(originalUrl)
.setScheme("http")
.setHost("localhost")
.setPort(7777)
.toUrl();
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
private static String faultInjectorHandling() {
double random = ThreadLocalRandom.current().nextDouble();
int choice = (int) (random * 100);
if (choice >= 25) {
return "f";
} else if (choice >= 1) {
if (random <= 0.34D) {
return "n";
} else if (random <= 0.67D) {
return "nc";
} else {
return "na";
}
} else {
if (random <= 0.25D) {
return "p";
} else if (random <= 0.50D) {
return "pc";
} else if (random <= 0.75D) {
return "pa";
} else {
return "pn";
}
}
}
}
} | class HttpFaultInjectingTests {
private static final ClientLogger LOGGER = new ClientLogger(HttpFaultInjectingTests.class);
private static final HttpHeaderName UPSTREAM_URI_HEADER = HttpHeaderName.fromString("X-Upstream-Base-Uri");
private static final HttpHeaderName HTTP_FAULT_INJECTOR_RESPONSE_HEADER
= HttpHeaderName.fromString("x-ms-faultinjector-response-option");
private BlobContainerClient containerClient;
@BeforeEach
public void setup() {
String testName = ("httpFaultInjectingTests" + CoreUtils.randomUuid().toString().replace("-", ""))
.toLowerCase();
containerClient = new BlobServiceClientBuilder()
.endpoint(ENVIRONMENT.getPrimaryAccount().getBlobEndpoint())
.credential(ENVIRONMENT.getPrimaryAccount().getCredential())
.httpClient(BlobTestBase.getHttpClient(() -> {
throw new RuntimeException("Test should not run during playback.");
}))
.buildClient()
.createBlobContainer(testName);
}
@AfterEach
public void teardown() {
if (containerClient != null) {
containerClient.delete();
}
}
/**
* Tests downloading to file with fault injection.
* <p>
* This test will upload a single blob of about 9MB and then download it in parallel 500 times. Each download will
* have its file contents compared to the original blob data. The test only cares about files that were properly
* downloaded, if a download fails with a network error it will be ignored. A requirement of 90% of files being
* successfully downloaded is also a requirement to prevent a case where most files failed to download and passing,
* hiding a true issue.
*/
@Test
public void downloadToFileWithFaultInjection() throws IOException, InterruptedException {
byte[] realFileBytes = new byte[9 * Constants.MB - 1];
ThreadLocalRandom.current().nextBytes(realFileBytes);
containerClient.getBlobClient(containerClient.getBlobContainerName())
.upload(BinaryData.fromBytes(realFileBytes), true);
BlobClient downloadClient = new BlobClientBuilder()
.endpoint(ENVIRONMENT.getPrimaryAccount().getBlobEndpoint())
.containerName(containerClient.getBlobContainerName())
.blobName(containerClient.getBlobContainerName())
.credential(ENVIRONMENT.getPrimaryAccount().getCredential())
.httpClient(new HttpFaultInjectingHttpClient(getFaultInjectingWrappedHttpClient()))
.retryOptions(new RequestRetryOptions(RetryPolicyType.FIXED, 4, null, 10L, 10L, null))
.buildClient();
List<File> files = new ArrayList<>(500);
for (int i = 0; i < 500; i++) {
File file = File.createTempFile(UUID.randomUUID().toString() + i, ".txt");
file.deleteOnExit();
files.add(file);
}
AtomicInteger successCount = new AtomicInteger();
Set<OpenOption> overwriteOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING,
StandardOpenOption.READ, StandardOpenOption.WRITE));
ExecutorService executorService = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
executorService.invokeAll(files.stream().map(it -> (Callable<Void>) () -> {
try {
downloadClient.downloadToFileWithResponse(new BlobDownloadToFileOptions(it.getAbsolutePath())
.setOpenOptions(overwriteOptions)
.setParallelTransferOptions(new ParallelTransferOptions().setMaxConcurrency(2)),
null, Context.NONE);
byte[] actualFileBytes = Files.readAllBytes(it.toPath());
TestUtils.assertArraysEqual(realFileBytes, actualFileBytes);
successCount.incrementAndGet();
Files.deleteIfExists(it.toPath());
} catch (Exception ex) {
LOGGER.atWarning()
.log(() -> "Failed to complete download, target download file: " + it.getAbsolutePath(), ex);
}
return null;
}).collect(Collectors.toList()));
executorService.shutdown();
executorService.awaitTermination(10, TimeUnit.MINUTES);
assertTrue(successCount.get() >= 450);
files.forEach(it -> {
try {
Files.deleteIfExists(it.toPath());
} catch (IOException e) {
LOGGER.atWarning().log(() -> "Failed to delete file: " + it.getAbsolutePath(), e);
}
});
}
private static final class HttpFaultInjectingHttpClient implements HttpClient {
private final HttpClient wrappedHttpClient;
HttpFaultInjectingHttpClient(HttpClient wrappedHttpClient) {
this.wrappedHttpClient = wrappedHttpClient;
}
@Override
public Mono<HttpResponse> send(HttpRequest request) {
return send(request, Context.NONE);
}
@Override
public Mono<HttpResponse> send(HttpRequest request, Context context) {
URL originalUrl = request.getUrl();
request.setHeader(UPSTREAM_URI_HEADER, originalUrl.toString()).setUrl(rewriteUrl(originalUrl));
String faultType = faultInjectorHandling();
request.setHeader(HTTP_FAULT_INJECTOR_RESPONSE_HEADER, faultType);
return wrappedHttpClient.send(request, context)
.map(response -> {
HttpRequest request1 = response.getRequest();
request1.getHeaders().remove(UPSTREAM_URI_HEADER);
request1.setUrl(originalUrl);
return response;
});
}
@Override
public HttpResponse sendSync(HttpRequest request, Context context) {
URL originalUrl = request.getUrl();
request.setHeader(UPSTREAM_URI_HEADER, originalUrl.toString()).setUrl(rewriteUrl(originalUrl));
String faultType = faultInjectorHandling();
request.setHeader(HTTP_FAULT_INJECTOR_RESPONSE_HEADER, faultType);
HttpResponse response = wrappedHttpClient.sendSync(request, context);
response.getRequest().setUrl(originalUrl);
response.getRequest().getHeaders().remove(UPSTREAM_URI_HEADER);
return response;
}
private static URL rewriteUrl(URL originalUrl) {
try {
return UrlBuilder.parse(originalUrl)
.setScheme("http")
.setHost("localhost")
.setPort(7777)
.toUrl();
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
private static String faultInjectorHandling() {
double random = ThreadLocalRandom.current().nextDouble();
int choice = (int) (random * 100);
if (choice >= 25) {
return "f";
} else if (choice >= 1) {
if (random <= 0.34D) {
return "n";
} else if (random <= 0.67D) {
return "nc";
} else {
return "na";
}
} else {
if (random <= 0.25D) {
return "p";
} else if (random <= 0.50D) {
return "pc";
} else if (random <= 0.75D) {
return "pa";
} else {
return "pn";
}
}
}
}
private static boolean shouldRun() {
String osName = System.getProperty("os.name").toLowerCase(Locale.ROOT);
return ENVIRONMENT.getTestMode() == TestMode.LIVE
&& !osName.contains("mac os")
&& !osName.contains("darwin");
}
} |
also worth adding the low bound comparison , like` duration.getSeconds > timeout based on the httpTimeoutPolicy`. By just comparing the high bound, it will not catch the issue | private void validateDataPlaneRetryPolicyResponseTimeouts(CosmosDiagnostics cosmosDiagnostics) {
List<ClientSideRequestStatistics.GatewayStatistics> gatewayStatisticsList = diagnosticsAccessor.getClientSideRequestStatistics(cosmosDiagnostics)
.stream()
.map(ClientSideRequestStatistics::getGatewayStatisticsList)
.flatMap(Collection::stream)
.collect(Collectors.toList());
for (ClientSideRequestStatistics.GatewayStatistics gs : gatewayStatisticsList) {
for (RequestTimeline.Event event : gs.getRequestTimeline()) {
Duration durationInMillis = event.getDuration();
if (durationInMillis != null) {
assertThat(durationInMillis.getSeconds()).isLessThanOrEqualTo(62);
}
}
}
} | assertThat(durationInMillis.getSeconds()).isLessThanOrEqualTo(62); | private void validateDataPlaneRetryPolicyResponseTimeouts(CosmosDiagnostics cosmosDiagnostics) {
List<ClientSideRequestStatistics.GatewayStatistics> gatewayStatisticsList = diagnosticsAccessor.getClientSideRequestStatistics(cosmosDiagnostics)
.stream()
.map(ClientSideRequestStatistics::getGatewayStatisticsList)
.flatMap(Collection::stream)
.collect(Collectors.toList());
for (ClientSideRequestStatistics.GatewayStatistics gs : gatewayStatisticsList) {
if (gs.getStatusCode() == HttpConstants.StatusCodes.REQUEST_TIMEOUT) {
for (RequestTimeline.Event event : gs.getRequestTimeline()) {
Duration durationInMillis = event.getDuration();
if (durationInMillis != null) {
assertThat(durationInMillis.getSeconds()).isLessThanOrEqualTo(62);
assertThat(durationInMillis.getSeconds()).isGreaterThanOrEqualTo(60);
}
}
}
}
} | class WebExceptionRetryPolicyE2ETests extends TestSuiteBase {
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private CosmosAsyncClient cosmosAsyncClient;
private CosmosAsyncContainer cosmosAsyncContainer;
@Factory(dataProvider = "clientBuildersWithSessionConsistency")
public WebExceptionRetryPolicyE2ETests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
this.subscriberValidationTimeout = TIMEOUT;
}
@BeforeClass(groups = {"multi-master"}, timeOut = TIMEOUT)
public void beforeClass() {
this.cosmosAsyncClient = getClientBuilder().buildAsyncClient();
this.cosmosAsyncContainer = getSharedMultiPartitionCosmosContainerWithIdAsPartitionKey(cosmosAsyncClient);
}
@AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(cosmosAsyncClient);
}
@DataProvider(name = "operationTypeProvider")
public static Object[][] operationTypeProvider() {
return new Object[][]{
{FaultInjectionOperationType.READ_ITEM, OperationType.Read}
};
}
@Test(groups = {"multi-master"}, timeOut = TIMEOUT)
public void addressRefreshHttpTimeout() {
if (BridgeInternal
.getContextClient(this.cosmosAsyncClient)
.getConnectionPolicy()
.getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("addressRefreshHttpTimeout() is only meant for DIRECT mode");
}
TestItem newItem = TestItem.createNewItem();
this.cosmosAsyncContainer.createItem(newItem).block();
FaultInjectionRule addressRefreshDelayRule = new FaultInjectionRuleBuilder("addressRefreshDelayRule")
.condition(
new FaultInjectionConditionBuilder()
.operationType(FaultInjectionOperationType.METADATA_REQUEST_ADDRESS_REFRESH)
.build())
.result(
FaultInjectionResultBuilders.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofSeconds(14))
.times(4)
.build()
)
.build();
FaultInjectionRule serverGoneRule = new FaultInjectionRuleBuilder("serverGoneRule")
.condition(
new FaultInjectionConditionBuilder()
.operationType(FaultInjectionOperationType.READ_ITEM)
.build())
.result(
FaultInjectionResultBuilders.getResultBuilder(FaultInjectionServerErrorType.GONE)
.times(4)
.build()
)
.build();
CosmosFaultInjectionHelper
.configureFaultInjectionRules(
cosmosAsyncContainer,
Arrays.asList(addressRefreshDelayRule, serverGoneRule)).block();
try {
cosmosAsyncContainer
.readItem(newItem.getId(), new PartitionKey(newItem.getId()), TestItem.class)
.block();
fail("addressRefreshHttpTimeout() should fail due to addressRefresh timeout");
} catch (CosmosException e) {
System.out.println("dataPlaneRequestHttpTimeout() Diagnostics " + " " + e.getDiagnostics());
assertThat(e.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.REQUEST_TIMEOUT);
assertThat(e.getSubStatusCode()).isEqualTo(HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
validateAddressRefreshRetryPolicyResponseTimeouts(e.getDiagnostics());
} finally {
addressRefreshDelayRule.disable();
serverGoneRule.disable();
}
}
@Test(groups = {"multi-master"}, dataProvider = "operationTypeProvider", timeOut = 8 * TIMEOUT)
public void dataPlaneRequestHttpTimeout(
FaultInjectionOperationType faultInjectionOperationType,
OperationType operationType) {
if (BridgeInternal
.getContextClient(this.cosmosAsyncClient)
.getConnectionPolicy()
.getConnectionMode() != ConnectionMode.GATEWAY) {
throw new SkipException("queryPlanHttpTimeoutWillNotMarkRegionUnavailable() is only meant for GATEWAY mode");
}
TestItem newItem = TestItem.createNewItem();
this.cosmosAsyncContainer.createItem(newItem).block();
FaultInjectionRule requestHttpTimeoutRule = new FaultInjectionRuleBuilder("requestHttpTimeoutRule" + UUID.randomUUID())
.condition(
new FaultInjectionConditionBuilder()
.operationType(faultInjectionOperationType)
.connectionType(FaultInjectionConnectionType.GATEWAY)
.build())
.result(
FaultInjectionResultBuilders.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofSeconds(66))
.times(4)
.build()
)
.build();
CosmosFaultInjectionHelper.configureFaultInjectionRules(this.cosmosAsyncContainer, Arrays.asList(requestHttpTimeoutRule)).block();
try {
CosmosDiagnostics cosmosDiagnostics =
this.performDocumentOperation(cosmosAsyncContainer, operationType, newItem).block();
System.out.println("dataPlaneRequestHttpTimeout() Diagnostics " + " " + cosmosDiagnostics);
validateDataPlaneRetryPolicyResponseTimeouts(cosmosDiagnostics);
} catch (Exception e) {
fail("dataPlaneRequestHttpTimeout() should succeed for operationType " + operationType, e);
} finally {
requestHttpTimeoutRule.disable();
}
}
private void validateAddressRefreshRetryPolicyResponseTimeouts(CosmosDiagnostics cosmosDiagnostics) {
List<ClientSideRequestStatistics.AddressResolutionStatistics> addressResolutionStatisticsList = diagnosticsAccessor.getClientSideRequestStatistics(cosmosDiagnostics)
.stream()
.map(ClientSideRequestStatistics::getAddressResolutionStatistics)
.flatMap(m -> m.values().stream())
.sorted(Comparator.comparing(ClientSideRequestStatistics.AddressResolutionStatistics::getStartTimeUTC))
.collect(Collectors.toList());
assertThat(MILLIS.between(addressResolutionStatisticsList.get(0).getStartTimeUTC(), addressResolutionStatisticsList.get(0).getEndTimeUTC())).isLessThanOrEqualTo(600);
assertThat(MILLIS.between(addressResolutionStatisticsList.get(1).getStartTimeUTC(), addressResolutionStatisticsList.get(1).getEndTimeUTC())).isLessThanOrEqualTo(600);
assertThat(SECONDS.between(addressResolutionStatisticsList.get(2).getStartTimeUTC(), addressResolutionStatisticsList.get(2).getEndTimeUTC())).isLessThanOrEqualTo(6);
assertThat(SECONDS.between(addressResolutionStatisticsList.get(3).getStartTimeUTC(), addressResolutionStatisticsList.get(3).getEndTimeUTC())).isLessThanOrEqualTo(11);
}
private Mono<CosmosDiagnostics> performDocumentOperation(
CosmosAsyncContainer cosmosAsyncContainer,
OperationType operationType,
TestItem createdItem) {
if (operationType == OperationType.Query) {
CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions();
String query = String.format("SELECT * from c where c.id = '%s'", createdItem.getId());
FeedResponse<TestItem> itemFeedResponse =
cosmosAsyncContainer.queryItems(query, queryRequestOptions, TestItem.class).byPage().blockFirst();
return Mono.just(itemFeedResponse.getCosmosDiagnostics());
}
if (operationType == OperationType.Read
|| operationType == OperationType.Delete
|| operationType == OperationType.Replace
|| operationType == OperationType.Create
|| operationType == OperationType.Patch
|| operationType == OperationType.Upsert) {
if (operationType == OperationType.Read) {
return cosmosAsyncContainer
.readItem(
createdItem.getId(),
new PartitionKey(createdItem.getId()),
TestItem.class
)
.map(itemResponse -> itemResponse.getDiagnostics());
}
if (operationType == OperationType.Replace) {
return cosmosAsyncContainer
.replaceItem(
createdItem,
createdItem.getId(),
new PartitionKey(createdItem.getId()))
.map(itemResponse -> itemResponse.getDiagnostics());
}
if (operationType == OperationType.Delete) {
return cosmosAsyncContainer.deleteItem(createdItem, null).map(itemResponse -> itemResponse.getDiagnostics());
}
if (operationType == OperationType.Create) {
return cosmosAsyncContainer.createItem(TestItem.createNewItem()).map(itemResponse -> itemResponse.getDiagnostics());
}
if (operationType == OperationType.Upsert) {
return cosmosAsyncContainer.upsertItem(TestItem.createNewItem()).map(itemResponse -> itemResponse.getDiagnostics());
}
if (operationType == OperationType.Patch) {
CosmosPatchOperations patchOperations =
CosmosPatchOperations
.create()
.add("newPath", "newPath");
return cosmosAsyncContainer
.patchItem(createdItem.getId(), new PartitionKey(createdItem.getId()), patchOperations, TestItem.class)
.map(itemResponse -> itemResponse.getDiagnostics());
}
}
if (operationType == OperationType.ReadFeed) {
List<FeedRange> feedRanges = cosmosAsyncContainer.getFeedRanges().block();
CosmosChangeFeedRequestOptions changeFeedRequestOptions =
CosmosChangeFeedRequestOptions.createForProcessingFromBeginning(feedRanges.get(0));
FeedResponse<TestItem> firstPage = cosmosAsyncContainer
.queryChangeFeed(changeFeedRequestOptions, TestItem.class)
.byPage()
.blockFirst();
return Mono.just(firstPage.getCosmosDiagnostics());
}
throw new IllegalArgumentException("The operation type is not supported");
}
} | class WebExceptionRetryPolicyE2ETests extends TestSuiteBase {
private final static Logger logger = LoggerFactory.getLogger(WebExceptionRetryPolicyE2ETests.class);
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private CosmosAsyncClient cosmosAsyncClient;
private CosmosAsyncContainer cosmosAsyncContainer;
@Factory(dataProvider = "clientBuildersWithSessionConsistency")
public WebExceptionRetryPolicyE2ETests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
this.subscriberValidationTimeout = TIMEOUT;
}
@BeforeClass(groups = {"multi-master"}, timeOut = TIMEOUT)
public void beforeClass() {
this.cosmosAsyncClient = getClientBuilder().buildAsyncClient();
this.cosmosAsyncContainer = getSharedMultiPartitionCosmosContainerWithIdAsPartitionKey(cosmosAsyncClient);
}
@AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(cosmosAsyncClient);
}
@DataProvider(name = "operationTypeProvider")
public static Object[][] operationTypeProvider() {
return new Object[][]{
{FaultInjectionOperationType.READ_ITEM, OperationType.Read},
{FaultInjectionOperationType.QUERY_ITEM, OperationType.Query}
};
}
@Test(groups = {"multi-master"}, timeOut = TIMEOUT)
public void addressRefreshHttpTimeout() {
if (BridgeInternal
.getContextClient(this.cosmosAsyncClient)
.getConnectionPolicy()
.getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("addressRefreshHttpTimeout() is only meant for DIRECT mode");
}
TestItem newItem = TestItem.createNewItem();
this.cosmosAsyncContainer.createItem(newItem).block();
FaultInjectionRule addressRefreshDelayRule = new FaultInjectionRuleBuilder("addressRefreshDelayRule")
.condition(
new FaultInjectionConditionBuilder()
.operationType(FaultInjectionOperationType.METADATA_REQUEST_ADDRESS_REFRESH)
.build())
.result(
FaultInjectionResultBuilders.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofSeconds(14))
.times(4)
.build()
)
.build();
FaultInjectionRule serverGoneRule = new FaultInjectionRuleBuilder("serverGoneRule")
.condition(
new FaultInjectionConditionBuilder()
.operationType(FaultInjectionOperationType.READ_ITEM)
.build())
.result(
FaultInjectionResultBuilders.getResultBuilder(FaultInjectionServerErrorType.GONE)
.times(4)
.build()
)
.build();
CosmosFaultInjectionHelper
.configureFaultInjectionRules(
cosmosAsyncContainer,
Arrays.asList(addressRefreshDelayRule, serverGoneRule)).block();
try {
cosmosAsyncContainer
.readItem(newItem.getId(), new PartitionKey(newItem.getId()), TestItem.class)
.block();
fail("addressRefreshHttpTimeout() should fail due to addressRefresh timeout");
} catch (CosmosException e) {
logger.info("dataPlaneRequestHttpTimeout() Diagnostics " + " " + e.getDiagnostics());
assertThat(e.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.REQUEST_TIMEOUT);
assertThat(e.getSubStatusCode()).isEqualTo(HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
validateAddressRefreshRetryPolicyResponseTimeouts(e.getDiagnostics());
} finally {
addressRefreshDelayRule.disable();
serverGoneRule.disable();
}
}
@Test(groups = {"multi-master"}, dataProvider = "operationTypeProvider", timeOut = 8 * TIMEOUT)
public void dataPlaneRequestHttpTimeout(
FaultInjectionOperationType faultInjectionOperationType,
OperationType operationType) {
if (BridgeInternal
.getContextClient(this.cosmosAsyncClient)
.getConnectionPolicy()
.getConnectionMode() != ConnectionMode.GATEWAY) {
throw new SkipException("queryPlanHttpTimeoutWillNotMarkRegionUnavailable() is only meant for GATEWAY mode");
}
TestItem newItem = TestItem.createNewItem();
this.cosmosAsyncContainer.createItem(newItem).block();
FaultInjectionRule requestHttpTimeoutRule = new FaultInjectionRuleBuilder("requestHttpTimeoutRule" + UUID.randomUUID())
.condition(
new FaultInjectionConditionBuilder()
.operationType(faultInjectionOperationType)
.connectionType(FaultInjectionConnectionType.GATEWAY)
.build())
.result(
FaultInjectionResultBuilders.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofSeconds(66))
.times(3)
.build()
)
.build();
CosmosFaultInjectionHelper.configureFaultInjectionRules(this.cosmosAsyncContainer, Arrays.asList(requestHttpTimeoutRule)).block();
try {
CosmosDiagnostics cosmosDiagnostics =
this.performDocumentOperation(cosmosAsyncContainer, operationType, newItem).block();
logger.info("dataPlaneRequestHttpTimeout() Diagnostics " + " " + cosmosDiagnostics);
validateDataPlaneRetryPolicyResponseTimeouts(cosmosDiagnostics);
} catch (Exception e) {
fail("dataPlaneRequestHttpTimeout() should succeed for operationType " + operationType, e);
} finally {
requestHttpTimeoutRule.disable();
}
}
@Test(groups = {"multi-master"}, timeOut = 8 * TIMEOUT)
public void writeOperationRequestHttpTimeout() {
if (BridgeInternal
.getContextClient(this.cosmosAsyncClient)
.getConnectionPolicy()
.getConnectionMode() != ConnectionMode.GATEWAY) {
throw new SkipException("queryPlanHttpTimeoutWillNotMarkRegionUnavailable() is only meant for GATEWAY mode");
}
TestItem newItem = TestItem.createNewItem();
this.cosmosAsyncContainer.createItem(newItem).block();
FaultInjectionRule requestHttpTimeoutRule = new FaultInjectionRuleBuilder("requestHttpTimeoutRule" + UUID.randomUUID())
.condition(
new FaultInjectionConditionBuilder()
.operationType(FaultInjectionOperationType.CREATE_ITEM)
.connectionType(FaultInjectionConnectionType.GATEWAY)
.build())
.result(
FaultInjectionResultBuilders.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofSeconds(66))
.times(2)
.build()
)
.build();
CosmosFaultInjectionHelper.configureFaultInjectionRules(this.cosmosAsyncContainer, Arrays.asList(requestHttpTimeoutRule)).block();
try {
CosmosDiagnostics cosmosDiagnostics =
this.performDocumentOperation(cosmosAsyncContainer, OperationType.Create, newItem).block();
fail("writeOperationRequestHttpTimeout() should fail for operationType " + OperationType.Create);
} catch (CosmosException e) {
logger.info("writeOperationRequestHttpTimeout() Diagnostics " + " " + e.getDiagnostics());
assertThat(e.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.REQUEST_TIMEOUT);
} finally {
requestHttpTimeoutRule.disable();
}
}
@Test(groups = {"multi-master"}, timeOut = 8 * TIMEOUT)
public void writeOperationConnectionTimeout() {
if (BridgeInternal
.getContextClient(this.cosmosAsyncClient)
.getConnectionPolicy()
.getConnectionMode() != ConnectionMode.GATEWAY) {
throw new SkipException("queryPlanHttpTimeoutWillNotMarkRegionUnavailable() is only meant for GATEWAY mode");
}
TestItem newItem = TestItem.createNewItem();
this.cosmosAsyncContainer.createItem(newItem).block();
FaultInjectionRule requestHttpTimeoutRule = new FaultInjectionRuleBuilder("requestHttpTimeoutRule" + UUID.randomUUID())
.condition(
new FaultInjectionConditionBuilder()
.operationType(FaultInjectionOperationType.CREATE_ITEM)
.connectionType(FaultInjectionConnectionType.GATEWAY)
.build())
.result(
FaultInjectionResultBuilders.getResultBuilder(FaultInjectionServerErrorType.CONNECTION_DELAY)
.delay(Duration.ofSeconds(66))
.times(3)
.build()
)
.build();
CosmosFaultInjectionHelper.configureFaultInjectionRules(this.cosmosAsyncContainer, Arrays.asList(requestHttpTimeoutRule)).block();
try {
CosmosDiagnostics cosmosDiagnostics =
this.performDocumentOperation(cosmosAsyncContainer, OperationType.Create, newItem).block();
logger.info("writeOperationConnectionTimeout() Diagnostics " + " " + cosmosDiagnostics);
} catch (CosmosException e) {
fail("writeOperationConnectionTimeout() should pass for operationType " + OperationType.Create);
assertThat(e.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.SERVICE_UNAVAILABLE);
} finally {
requestHttpTimeoutRule.disable();
}
}
private void validateAddressRefreshRetryPolicyResponseTimeouts(CosmosDiagnostics cosmosDiagnostics) {
List<ClientSideRequestStatistics.AddressResolutionStatistics> addressResolutionStatisticsList = diagnosticsAccessor.getClientSideRequestStatistics(cosmosDiagnostics)
.stream()
.map(ClientSideRequestStatistics::getAddressResolutionStatistics)
.flatMap(m -> m.values().stream())
.sorted(Comparator.comparing(ClientSideRequestStatistics.AddressResolutionStatistics::getStartTimeUTC))
.collect(Collectors.toList());
assertThat(MILLIS.between(addressResolutionStatisticsList.get(0).getStartTimeUTC(), addressResolutionStatisticsList.get(0).getEndTimeUTC())).isLessThanOrEqualTo(600);
assertThat(SECONDS.between(addressResolutionStatisticsList.get(1).getStartTimeUTC(), addressResolutionStatisticsList.get(1).getEndTimeUTC())).isLessThanOrEqualTo(6);
assertThat(SECONDS.between(addressResolutionStatisticsList.get(2).getStartTimeUTC(), addressResolutionStatisticsList.get(2).getEndTimeUTC())).isLessThanOrEqualTo(11);
}
private Mono<CosmosDiagnostics> performDocumentOperation(
CosmosAsyncContainer cosmosAsyncContainer,
OperationType operationType,
TestItem createdItem) {
switch(operationType) {
case Query:
CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions();
String query = String.format("SELECT * from c where c.id = '%s'", createdItem.getId());
FeedResponse<TestItem> itemFeedResponse =
cosmosAsyncContainer.queryItems(query, queryRequestOptions, TestItem.class).byPage().blockFirst();
return Mono.just(itemFeedResponse.getCosmosDiagnostics());
case Read:
return cosmosAsyncContainer
.readItem(
createdItem.getId(),
new PartitionKey(createdItem.getId()),
TestItem.class
)
.map(itemResponse -> itemResponse.getDiagnostics());
case Replace:
return cosmosAsyncContainer
.replaceItem(
createdItem,
createdItem.getId(),
new PartitionKey(createdItem.getId()))
.map(itemResponse -> itemResponse.getDiagnostics());
case Delete:
return cosmosAsyncContainer.deleteItem(createdItem, null).map(itemResponse -> itemResponse.getDiagnostics());
case Create:
return cosmosAsyncContainer.createItem(TestItem.createNewItem()).map(itemResponse -> itemResponse.getDiagnostics());
case Upsert:
return cosmosAsyncContainer.upsertItem(TestItem.createNewItem()).map(itemResponse -> itemResponse.getDiagnostics());
case Patch:
CosmosPatchOperations patchOperations =
CosmosPatchOperations
.create()
.add("newPath", "newPath");
return cosmosAsyncContainer
.patchItem(createdItem.getId(), new PartitionKey(createdItem.getId()), patchOperations, TestItem.class)
.map(itemResponse -> itemResponse.getDiagnostics());
case ReadFeed:
List<FeedRange> feedRanges = cosmosAsyncContainer.getFeedRanges().block();
CosmosChangeFeedRequestOptions changeFeedRequestOptions =
CosmosChangeFeedRequestOptions.createForProcessingFromBeginning(feedRanges.get(0));
FeedResponse<TestItem> firstPage = cosmosAsyncContainer
.queryChangeFeed(changeFeedRequestOptions, TestItem.class)
.byPage()
.blockFirst();
return Mono.just(firstPage.getCosmosDiagnostics());
}
throw new IllegalArgumentException("The operation type is not supported");
}
} |
This can cause IndexOutOfBound exception, worth checking for list is not empty before dereferencing it. | public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) {
try {
HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders());
Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux();
HttpRequest httpRequest = new HttpRequest(method,
requestUri,
requestUri.getPort(),
httpHeaders,
contentAsByteArray);
if (request.getResponseTimeout() == null) {
request.setResponseTimeout(HttpTimeoutPolicy.getTimeoutPolicy(request).
getTimeoutAndDelaysList().get(0).getResponseTimeout());
}
Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, request.getResponseTimeout());
if (this.gatewayServerErrorInjector != null) {
httpResponseMono = this.gatewayServerErrorInjector.injectGatewayErrors(request.getResponseTimeout(),
httpRequest, request, httpResponseMono);
return toDocumentServiceResponse(httpResponseMono, request, httpRequest);
}
return toDocumentServiceResponse(httpResponseMono, request, httpRequest);
} catch (Exception e) {
return Mono.error(e);
}
} | getTimeoutAndDelaysList().get(0).getResponseTimeout()); | public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) {
try {
HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders());
Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux();
HttpRequest httpRequest = new HttpRequest(method,
requestUri,
requestUri.getPort(),
httpHeaders,
contentAsByteArray);
Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, request.getResponseTimeout());
if (this.gatewayServerErrorInjector != null) {
httpResponseMono = this.gatewayServerErrorInjector.injectGatewayErrors(request.getResponseTimeout(),
httpRequest, request, httpResponseMono);
return toDocumentServiceResponse(httpResponseMono, request, httpRequest);
}
return toDocumentServiceResponse(httpResponseMono, request, httpRequest);
} catch (Exception e) {
return Mono.error(e);
}
} | class RxGatewayStoreModel implements RxStoreModel {
private final static byte[] EMPTY_BYTE_ARRAY = {};
private final DiagnosticsClientContext clientContext;
private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class);
private final Map<String, String> defaultHeaders;
private final HttpClient httpClient;
private final QueryCompatibilityMode queryCompatibilityMode;
private final GlobalEndpointManager globalEndpointManager;
private ConsistencyLevel defaultConsistencyLevel;
private ISessionContainer sessionContainer;
private ThroughputControlStore throughputControlStore;
private boolean useMultipleWriteLocations;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private GatewayServiceConfigurationReader gatewayServiceConfigurationReader;
private RxClientCollectionCache collectionCache;
private GatewayServerErrorInjector gatewayServerErrorInjector;
public RxGatewayStoreModel(
DiagnosticsClientContext clientContext,
ISessionContainer sessionContainer,
ConsistencyLevel defaultConsistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
this.clientContext = clientContext;
this.defaultHeaders = new HashMap<>();
this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL,
"no-cache");
this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION,
HttpConstants.Versions.CURRENT_VERSION);
this.defaultHeaders.put(
HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES,
HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
if (apiType != null) {
this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString());
}
if (userAgentContainer == null) {
userAgentContainer = new UserAgentContainer();
}
this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent());
if (defaultConsistencyLevel != null) {
this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL,
defaultConsistencyLevel.toString());
}
this.defaultConsistencyLevel = defaultConsistencyLevel;
this.globalEndpointManager = globalEndpointManager;
this.queryCompatibilityMode = queryCompatibilityMode;
this.httpClient = httpClient;
this.sessionContainer = sessionContainer;
}
void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) {
this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader;
}
public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) {
this.partitionKeyRangeCache = partitionKeyRangeCache;
}
public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) {
this.useMultipleWriteLocations = useMultipleWriteLocations;
}
boolean isUseMultipleWriteLocations() {
return useMultipleWriteLocations;
}
RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
GatewayServiceConfigurationReader getGatewayServiceConfigurationReader() {
return gatewayServiceConfigurationReader;
}
RxClientCollectionCache getCollectionCache() {
return collectionCache;
}
public void setCollectionCache(RxClientCollectionCache collectionCache) {
this.collectionCache = collectionCache;
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PATCH);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PUT);
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.DELETE);
}
private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
if (request.getOperationType() != OperationType.QueryPlan) {
request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true");
}
switch (this.queryCompatibilityMode) {
case SqlQuery:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.SQL);
break;
case Default:
case Query:
default:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.QUERY_JSON);
break;
}
return this.performRequest(request, HttpMethod.POST);
}
public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) {
try {
if (request.requestContext.cosmosDiagnostics == null) {
request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics();
}
URI uri = getUri(request);
request.requestContext.resourcePhysicalAddress = uri.toString();
if (this.throughputControlStore != null) {
return this.throughputControlStore.processRequest(request, Mono.defer(() -> this.performRequestInternal(request, method, uri)));
}
return this.performRequestInternal(request, method, uri);
} catch (Exception e) {
return Mono.error(e);
}
}
/**
* Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse.
*
* @param request
* @param method
* @param requestUri
* @return Flux<RxDocumentServiceResponse>
*/
private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) {
HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size());
for (Entry<String, String> entry : this.defaultHeaders.entrySet()) {
if (!headers.containsKey(entry.getKey())) {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
if (headers != null) {
for (Entry<String, String> entry : headers.entrySet()) {
if (entry.getValue() == null) {
httpHeaders.set(entry.getKey(), "");
} else {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
}
return httpHeaders;
}
private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException {
URI rootUri = request.getEndpointOverride();
if (rootUri == null) {
if (request.getIsMedia()) {
rootUri = this.globalEndpointManager.getWriteEndpoints().get(0);
} else {
rootUri = this.globalEndpointManager.resolveServiceEndpoint(request);
}
}
String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed);
if (request.getResourceType().equals(ResourceType.DatabaseAccount)) {
path = StringUtils.EMPTY;
}
return new URI("https",
null,
rootUri.getHost(),
rootUri.getPort(),
ensureSlashPrefixed(path),
null,
null);
}
private String ensureSlashPrefixed(String path) {
if (path == null) {
return null;
}
if (path.startsWith("/")) {
return path;
}
return "/" + path;
}
/**
* Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable.
*
*
* Once the customer code subscribes to the observable returned by the CRUD APIs,
* the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made.
*
* @param httpResponseMono
* @param request
* @return {@link Mono}
*/
private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono,
RxDocumentServiceRequest request,
HttpRequest httpRequest) {
return httpResponseMono.flatMap(httpResponse -> {
HttpHeaders httpResponseHeaders = httpResponse.headers();
int httpResponseStatus = httpResponse.statusCode();
Mono<byte[]> contentObservable = httpResponse
.bodyAsByteArray()
.switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY));
return contentObservable
.map(content -> {
ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord();
if (reactorNettyRequestRecord != null) {
reactorNettyRequestRecord.setTimeCompleted(Instant.now());
}
validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content);
StoreResponse rsp = new StoreResponse(httpResponseStatus,
HttpUtils.unescape(httpResponseHeaders.toMap()),
content);
if (reactorNettyRequestRecord != null) {
rsp.setRequestTimeline(reactorNettyRequestRecord.takeTimelineSnapshot());
if (this.gatewayServerErrorInjector != null) {
rsp.setFaultInjectionRuleId(
request
.faultInjectionRequestContext
.getFaultInjectionRuleId(reactorNettyRequestRecord.getTransportRequestId()));
rsp.setFaultInjectionRuleEvaluationResults(
request
.faultInjectionRequestContext
.getFaultInjectionRuleEvaluationResults(reactorNettyRequestRecord.getTransportRequestId()));
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, globalEndpointManager);
}
return rsp;
})
.single();
}).map(rsp -> {
RxDocumentServiceResponse rxDocumentServiceResponse;
if (httpRequest.reactorNettyRequestRecord() != null) {
rxDocumentServiceResponse =
new RxDocumentServiceResponse(this.clientContext, rsp,
httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot());
} else {
rxDocumentServiceResponse =
new RxDocumentServiceResponse(this.clientContext, rsp);
}
rxDocumentServiceResponse.setCosmosDiagnostics(request.requestContext.cosmosDiagnostics);
return rxDocumentServiceResponse;
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
if (httpRequest.reactorNettyRequestRecord() != null) {
ReactorNettyRequestRecord reactorNettyRequestRecord = httpRequest.reactorNettyRequestRecord();
BridgeInternal.setRequestTimeline(dce, reactorNettyRequestRecord.takeTimelineSnapshot());
ImplementationBridgeHelpers
.CosmosExceptionHelper
.getCosmosExceptionAccessor()
.setFaultInjectionRuleId(
dce,
request.faultInjectionRequestContext
.getFaultInjectionRuleId(reactorNettyRequestRecord.getTransportRequestId()));
ImplementationBridgeHelpers
.CosmosExceptionHelper
.getCosmosExceptionAccessor()
.setFaultInjectionEvaluationResults(
dce,
request.faultInjectionRequestContext
.getFaultInjectionRuleEvaluationResults(reactorNettyRequestRecord.getTransportRequestId()));
}
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, globalEndpointManager);
}
return Mono.error(dce);
});
}
private void validateOrThrow(RxDocumentServiceRequest request,
HttpResponseStatus status,
HttpHeaders headers,
byte[] bodyAsBytes) {
int statusCode = status.code();
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) {
String statusCodeString = status.reasonPhrase() != null
? status.reasonPhrase().replace(" ", "")
: "";
String body = bodyAsBytes != null ? new String(bodyAsBytes, StandardCharsets.UTF_8) : null;
CosmosError cosmosError;
cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError();
cosmosError = new CosmosError(statusCodeString,
String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString),
cosmosError.getPartitionedQueryExecutionInfo());
CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap());
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
throw dce;
}
}
private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) {
switch (request.getOperationType()) {
case Create:
case Batch:
return this.create(request);
case Patch:
return this.patch(request);
case Upsert:
return this.upsert(request);
case Delete:
if (request.getResourceType() == ResourceType.PartitionKey) {
return this.deleteByPartitionKey(request);
}
return this.delete(request);
case ExecuteJavaScript:
return this.execute(request);
case Read:
return this.read(request);
case ReadFeed:
return this.readFeed(request);
case Replace:
return this.replace(request);
case SqlQuery:
case Query:
case QueryPlan:
return this.query(request);
default:
throw new IllegalStateException("Unknown operation setType " + request.getOperationType());
}
}
private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) {
Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single();
MetadataRequestRetryPolicy metadataRequestRetryPolicy = new MetadataRequestRetryPolicy(this.globalEndpointManager);
metadataRequestRetryPolicy.onBeforeSendRequest(request);
return BackoffRetryUtility.executeRetry(funcDelegate, metadataRequestRetryPolicy);
}
@Override
public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) {
Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request));
return responseObs.onErrorResume(
e -> {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure {}", e.getMessage(), e);
return Mono.error(e);
}
if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) &&
(dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED ||
dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT ||
(
dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND &&
!Exceptions.isSubStatusCode(dce,
HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) {
this.captureSessionToken(request, dce.getResponseHeaders());
}
if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) {
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, globalEndpointManager);
}
}
return Mono.error(dce);
}
).flatMap(response ->
this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response))
);
}
@Override
public void enableThroughputControl(ThroughputControlStore throughputControlStore) {
this.throughputControlStore = throughputControlStore;
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return Flux.empty();
}
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider, Configs configs) {
if (this.gatewayServerErrorInjector == null) {
this.gatewayServerErrorInjector = new GatewayServerErrorInjector(configs);
}
this.gatewayServerErrorInjector.registerServerErrorInjector(injectorProvider.getServerErrorInjector());
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
}
private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) {
if (request.getResourceType() == ResourceType.DocumentCollection &&
request.getOperationType() == OperationType.Delete) {
String resourceId;
if (request.getIsNameBased()) {
resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID);
} else {
resourceId = request.getResourceId();
}
this.sessionContainer.clearTokenByResourceId(resourceId);
} else {
this.sessionContainer.setSessionToken(request, responseHeaders);
}
}
private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request,
Map<String, String> responseHeaders) {
this.captureSessionToken(request, responseHeaders);
if (request.requestContext.resolvedPartitionKeyRange != null &&
StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) &&
StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) &&
!responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) {
return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid)
.flatMap(collectionRoutingMapValueHolder -> Mono.empty());
}
return Mono.empty();
}
private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) {
return applySessionToken(request).then(addIntendedCollectionRid(request));
}
private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) {
if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) {
return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> {
if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) {
request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER,
request.requestContext.resolvedCollectionRid);
} else {
request.intendedCollectionRidPassedIntoSDK = true;
}
return Mono.empty();
});
}
return Mono.empty();
}
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) {
Map<String, String> headers = request.getHeaders();
Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null");
if (isMasterOperation(request.getResourceType(), request.getOperationType())) {
if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) {
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
}
return Mono.empty();
}
boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader,
request) == ConsistencyLevel.SESSION;
if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) {
if (!sessionConsistency ||
(!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) {
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
}
return Mono.empty();
}
if (!sessionConsistency ||
(!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) {
return Mono.empty();
}
if (this.collectionCache != null && this.partitionKeyRangeCache != null) {
return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).
flatMap(collectionValueHolder -> {
if (collectionValueHolder == null || collectionValueHolder.v == null) {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collectionValueHolder.v.getResourceId(),
null,
null).flatMap(collectionRoutingMapValueHolder -> {
if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
String partitionKeyRangeId =
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID);
PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal();
if (StringUtils.isNotEmpty(partitionKeyRangeId)) {
PartitionKeyRange range =
collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId);
request.requestContext.resolvedPartitionKeyRange = range;
if (request.requestContext.resolvedPartitionKeyRange == null) {
SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId,
sessionContainer);
} else {
SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer);
}
} else if (partitionKeyInternal != null) {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
partitionKeyInternal,
collectionValueHolder.v.getPartitionKey());
PartitionKeyRange range =
collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
request.requestContext.resolvedPartitionKeyRange = range;
SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer);
} else {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
}
return Mono.empty();
});
});
} else {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
}
private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) {
return ReplicatedResourceClientUtils.isMasterResource(resourceType) ||
isStoredProcedureMasterOperation(resourceType, operationType) ||
operationType == OperationType.QueryPlan;
}
private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) {
return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript;
}
} | class RxGatewayStoreModel implements RxStoreModel {
private final static byte[] EMPTY_BYTE_ARRAY = {};
private final DiagnosticsClientContext clientContext;
private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class);
private final Map<String, String> defaultHeaders;
private final HttpClient httpClient;
private final QueryCompatibilityMode queryCompatibilityMode;
private final GlobalEndpointManager globalEndpointManager;
private ConsistencyLevel defaultConsistencyLevel;
private ISessionContainer sessionContainer;
private ThroughputControlStore throughputControlStore;
private boolean useMultipleWriteLocations;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private GatewayServiceConfigurationReader gatewayServiceConfigurationReader;
private RxClientCollectionCache collectionCache;
private GatewayServerErrorInjector gatewayServerErrorInjector;
public RxGatewayStoreModel(
DiagnosticsClientContext clientContext,
ISessionContainer sessionContainer,
ConsistencyLevel defaultConsistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
this.clientContext = clientContext;
this.defaultHeaders = new HashMap<>();
this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL,
"no-cache");
this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION,
HttpConstants.Versions.CURRENT_VERSION);
this.defaultHeaders.put(
HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES,
HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
if (apiType != null) {
this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString());
}
if (userAgentContainer == null) {
userAgentContainer = new UserAgentContainer();
}
this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent());
if (defaultConsistencyLevel != null) {
this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL,
defaultConsistencyLevel.toString());
}
this.defaultConsistencyLevel = defaultConsistencyLevel;
this.globalEndpointManager = globalEndpointManager;
this.queryCompatibilityMode = queryCompatibilityMode;
this.httpClient = httpClient;
this.sessionContainer = sessionContainer;
}
void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) {
this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader;
}
public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) {
this.partitionKeyRangeCache = partitionKeyRangeCache;
}
public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) {
this.useMultipleWriteLocations = useMultipleWriteLocations;
}
boolean isUseMultipleWriteLocations() {
return useMultipleWriteLocations;
}
RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
GatewayServiceConfigurationReader getGatewayServiceConfigurationReader() {
return gatewayServiceConfigurationReader;
}
RxClientCollectionCache getCollectionCache() {
return collectionCache;
}
public void setCollectionCache(RxClientCollectionCache collectionCache) {
this.collectionCache = collectionCache;
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PATCH);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PUT);
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.DELETE);
}
private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
if (request.getOperationType() != OperationType.QueryPlan) {
request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true");
}
switch (this.queryCompatibilityMode) {
case SqlQuery:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.SQL);
break;
case Default:
case Query:
default:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.QUERY_JSON);
break;
}
return this.performRequest(request, HttpMethod.POST);
}
public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) {
try {
if (request.requestContext.cosmosDiagnostics == null) {
request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics();
}
URI uri = getUri(request);
request.requestContext.resourcePhysicalAddress = uri.toString();
if (this.throughputControlStore != null) {
return this.throughputControlStore.processRequest(request, Mono.defer(() -> this.performRequestInternal(request, method, uri)));
}
return this.performRequestInternal(request, method, uri);
} catch (Exception e) {
return Mono.error(e);
}
}
/**
* Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse.
*
* @param request
* @param method
* @param requestUri
* @return Flux<RxDocumentServiceResponse>
*/
private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) {
HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size());
for (Entry<String, String> entry : this.defaultHeaders.entrySet()) {
if (!headers.containsKey(entry.getKey())) {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
if (headers != null) {
for (Entry<String, String> entry : headers.entrySet()) {
if (entry.getValue() == null) {
httpHeaders.set(entry.getKey(), "");
} else {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
}
return httpHeaders;
}
private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException {
URI rootUri = request.getEndpointOverride();
if (rootUri == null) {
if (request.getIsMedia()) {
rootUri = this.globalEndpointManager.getWriteEndpoints().get(0);
} else {
rootUri = this.globalEndpointManager.resolveServiceEndpoint(request);
}
}
String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed);
if (request.getResourceType().equals(ResourceType.DatabaseAccount)) {
path = StringUtils.EMPTY;
}
return new URI("https",
null,
rootUri.getHost(),
rootUri.getPort(),
ensureSlashPrefixed(path),
null,
null);
}
private String ensureSlashPrefixed(String path) {
if (path == null) {
return null;
}
if (path.startsWith("/")) {
return path;
}
return "/" + path;
}
/**
* Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable.
*
*
* Once the customer code subscribes to the observable returned by the CRUD APIs,
* the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made.
*
* @param httpResponseMono
* @param request
* @return {@link Mono}
*/
private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono,
RxDocumentServiceRequest request,
HttpRequest httpRequest) {
return httpResponseMono.flatMap(httpResponse -> {
HttpHeaders httpResponseHeaders = httpResponse.headers();
int httpResponseStatus = httpResponse.statusCode();
Mono<byte[]> contentObservable = httpResponse
.bodyAsByteArray()
.switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY));
return contentObservable
.map(content -> {
ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord();
if (reactorNettyRequestRecord != null) {
reactorNettyRequestRecord.setTimeCompleted(Instant.now());
}
validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content);
StoreResponse rsp = new StoreResponse(httpResponseStatus,
HttpUtils.unescape(httpResponseHeaders.toMap()),
content);
if (reactorNettyRequestRecord != null) {
rsp.setRequestTimeline(reactorNettyRequestRecord.takeTimelineSnapshot());
if (this.gatewayServerErrorInjector != null) {
rsp.setFaultInjectionRuleId(
request
.faultInjectionRequestContext
.getFaultInjectionRuleId(reactorNettyRequestRecord.getTransportRequestId()));
rsp.setFaultInjectionRuleEvaluationResults(
request
.faultInjectionRequestContext
.getFaultInjectionRuleEvaluationResults(reactorNettyRequestRecord.getTransportRequestId()));
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, globalEndpointManager);
}
return rsp;
})
.single();
}).map(rsp -> {
RxDocumentServiceResponse rxDocumentServiceResponse;
if (httpRequest.reactorNettyRequestRecord() != null) {
rxDocumentServiceResponse =
new RxDocumentServiceResponse(this.clientContext, rsp,
httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot());
} else {
rxDocumentServiceResponse =
new RxDocumentServiceResponse(this.clientContext, rsp);
}
rxDocumentServiceResponse.setCosmosDiagnostics(request.requestContext.cosmosDiagnostics);
return rxDocumentServiceResponse;
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
if (httpRequest.reactorNettyRequestRecord() != null) {
ReactorNettyRequestRecord reactorNettyRequestRecord = httpRequest.reactorNettyRequestRecord();
BridgeInternal.setRequestTimeline(dce, reactorNettyRequestRecord.takeTimelineSnapshot());
ImplementationBridgeHelpers
.CosmosExceptionHelper
.getCosmosExceptionAccessor()
.setFaultInjectionRuleId(
dce,
request.faultInjectionRequestContext
.getFaultInjectionRuleId(reactorNettyRequestRecord.getTransportRequestId()));
ImplementationBridgeHelpers
.CosmosExceptionHelper
.getCosmosExceptionAccessor()
.setFaultInjectionEvaluationResults(
dce,
request.faultInjectionRequestContext
.getFaultInjectionRuleEvaluationResults(reactorNettyRequestRecord.getTransportRequestId()));
}
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, globalEndpointManager);
}
return Mono.error(dce);
});
}
private void validateOrThrow(RxDocumentServiceRequest request,
HttpResponseStatus status,
HttpHeaders headers,
byte[] bodyAsBytes) {
int statusCode = status.code();
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) {
String statusCodeString = status.reasonPhrase() != null
? status.reasonPhrase().replace(" ", "")
: "";
String body = bodyAsBytes != null ? new String(bodyAsBytes, StandardCharsets.UTF_8) : null;
CosmosError cosmosError;
cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError();
cosmosError = new CosmosError(statusCodeString,
String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString),
cosmosError.getPartitionedQueryExecutionInfo());
CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap());
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
throw dce;
}
}
private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) {
switch (request.getOperationType()) {
case Create:
case Batch:
return this.create(request);
case Patch:
return this.patch(request);
case Upsert:
return this.upsert(request);
case Delete:
if (request.getResourceType() == ResourceType.PartitionKey) {
return this.deleteByPartitionKey(request);
}
return this.delete(request);
case ExecuteJavaScript:
return this.execute(request);
case Read:
return this.read(request);
case ReadFeed:
return this.readFeed(request);
case Replace:
return this.replace(request);
case SqlQuery:
case Query:
case QueryPlan:
return this.query(request);
default:
throw new IllegalStateException("Unknown operation setType " + request.getOperationType());
}
}
private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) {
Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single();
MetadataRequestRetryPolicy metadataRequestRetryPolicy = new MetadataRequestRetryPolicy(this.globalEndpointManager);
metadataRequestRetryPolicy.onBeforeSendRequest(request);
return BackoffRetryUtility.executeRetry(funcDelegate, metadataRequestRetryPolicy);
}
@Override
public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) {
Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request));
return responseObs.onErrorResume(
e -> {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure {}", e.getMessage(), e);
return Mono.error(e);
}
if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) &&
(dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED ||
dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT ||
(
dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND &&
!Exceptions.isSubStatusCode(dce,
HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) {
this.captureSessionToken(request, dce.getResponseHeaders());
}
if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) {
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, globalEndpointManager);
}
}
return Mono.error(dce);
}
).flatMap(response ->
this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response))
);
}
@Override
public void enableThroughputControl(ThroughputControlStore throughputControlStore) {
this.throughputControlStore = throughputControlStore;
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return Flux.empty();
}
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider, Configs configs) {
if (this.gatewayServerErrorInjector == null) {
this.gatewayServerErrorInjector = new GatewayServerErrorInjector(configs);
}
this.gatewayServerErrorInjector.registerServerErrorInjector(injectorProvider.getServerErrorInjector());
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
}
private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) {
if (request.getResourceType() == ResourceType.DocumentCollection &&
request.getOperationType() == OperationType.Delete) {
String resourceId;
if (request.getIsNameBased()) {
resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID);
} else {
resourceId = request.getResourceId();
}
this.sessionContainer.clearTokenByResourceId(resourceId);
} else {
this.sessionContainer.setSessionToken(request, responseHeaders);
}
}
private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request,
Map<String, String> responseHeaders) {
this.captureSessionToken(request, responseHeaders);
if (request.requestContext.resolvedPartitionKeyRange != null &&
StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) &&
StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) &&
!responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) {
return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid)
.flatMap(collectionRoutingMapValueHolder -> Mono.empty());
}
return Mono.empty();
}
private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) {
return applySessionToken(request).then(addIntendedCollectionRid(request));
}
private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) {
if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) {
return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> {
if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) {
request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER,
request.requestContext.resolvedCollectionRid);
} else {
request.intendedCollectionRidPassedIntoSDK = true;
}
return Mono.empty();
});
}
return Mono.empty();
}
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) {
Map<String, String> headers = request.getHeaders();
Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null");
if (isMasterOperation(request.getResourceType(), request.getOperationType())) {
if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) {
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
}
return Mono.empty();
}
boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader,
request) == ConsistencyLevel.SESSION;
if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) {
if (!sessionConsistency ||
(!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) {
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
}
return Mono.empty();
}
if (!sessionConsistency ||
(!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) {
return Mono.empty();
}
if (this.collectionCache != null && this.partitionKeyRangeCache != null) {
return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).
flatMap(collectionValueHolder -> {
if (collectionValueHolder == null || collectionValueHolder.v == null) {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collectionValueHolder.v.getResourceId(),
null,
null).flatMap(collectionRoutingMapValueHolder -> {
if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
String partitionKeyRangeId =
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID);
PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal();
if (StringUtils.isNotEmpty(partitionKeyRangeId)) {
PartitionKeyRange range =
collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId);
request.requestContext.resolvedPartitionKeyRange = range;
if (request.requestContext.resolvedPartitionKeyRange == null) {
SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId,
sessionContainer);
} else {
SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer);
}
} else if (partitionKeyInternal != null) {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
partitionKeyInternal,
collectionValueHolder.v.getPartitionKey());
PartitionKeyRange range =
collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
request.requestContext.resolvedPartitionKeyRange = range;
SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer);
} else {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
}
return Mono.empty();
});
});
} else {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
}
private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) {
return ReplicatedResourceClientUtils.isMasterResource(resourceType) ||
isStoredProcedureMasterOperation(resourceType, operationType) ||
operationType == OperationType.QueryPlan;
}
private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) {
return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript;
}
} |
Fixed it. | public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) {
try {
HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders());
Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux();
HttpRequest httpRequest = new HttpRequest(method,
requestUri,
requestUri.getPort(),
httpHeaders,
contentAsByteArray);
if (request.getResponseTimeout() == null) {
request.setResponseTimeout(HttpTimeoutPolicy.getTimeoutPolicy(request).
getTimeoutAndDelaysList().get(0).getResponseTimeout());
}
Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, request.getResponseTimeout());
if (this.gatewayServerErrorInjector != null) {
httpResponseMono = this.gatewayServerErrorInjector.injectGatewayErrors(request.getResponseTimeout(),
httpRequest, request, httpResponseMono);
return toDocumentServiceResponse(httpResponseMono, request, httpRequest);
}
return toDocumentServiceResponse(httpResponseMono, request, httpRequest);
} catch (Exception e) {
return Mono.error(e);
}
} | getTimeoutAndDelaysList().get(0).getResponseTimeout()); | public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) {
try {
HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders());
Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux();
HttpRequest httpRequest = new HttpRequest(method,
requestUri,
requestUri.getPort(),
httpHeaders,
contentAsByteArray);
Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, request.getResponseTimeout());
if (this.gatewayServerErrorInjector != null) {
httpResponseMono = this.gatewayServerErrorInjector.injectGatewayErrors(request.getResponseTimeout(),
httpRequest, request, httpResponseMono);
return toDocumentServiceResponse(httpResponseMono, request, httpRequest);
}
return toDocumentServiceResponse(httpResponseMono, request, httpRequest);
} catch (Exception e) {
return Mono.error(e);
}
} | class RxGatewayStoreModel implements RxStoreModel {
private final static byte[] EMPTY_BYTE_ARRAY = {};
private final DiagnosticsClientContext clientContext;
private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class);
private final Map<String, String> defaultHeaders;
private final HttpClient httpClient;
private final QueryCompatibilityMode queryCompatibilityMode;
private final GlobalEndpointManager globalEndpointManager;
private ConsistencyLevel defaultConsistencyLevel;
private ISessionContainer sessionContainer;
private ThroughputControlStore throughputControlStore;
private boolean useMultipleWriteLocations;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private GatewayServiceConfigurationReader gatewayServiceConfigurationReader;
private RxClientCollectionCache collectionCache;
private GatewayServerErrorInjector gatewayServerErrorInjector;
public RxGatewayStoreModel(
DiagnosticsClientContext clientContext,
ISessionContainer sessionContainer,
ConsistencyLevel defaultConsistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
this.clientContext = clientContext;
this.defaultHeaders = new HashMap<>();
this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL,
"no-cache");
this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION,
HttpConstants.Versions.CURRENT_VERSION);
this.defaultHeaders.put(
HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES,
HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
if (apiType != null) {
this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString());
}
if (userAgentContainer == null) {
userAgentContainer = new UserAgentContainer();
}
this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent());
if (defaultConsistencyLevel != null) {
this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL,
defaultConsistencyLevel.toString());
}
this.defaultConsistencyLevel = defaultConsistencyLevel;
this.globalEndpointManager = globalEndpointManager;
this.queryCompatibilityMode = queryCompatibilityMode;
this.httpClient = httpClient;
this.sessionContainer = sessionContainer;
}
void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) {
this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader;
}
public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) {
this.partitionKeyRangeCache = partitionKeyRangeCache;
}
public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) {
this.useMultipleWriteLocations = useMultipleWriteLocations;
}
boolean isUseMultipleWriteLocations() {
return useMultipleWriteLocations;
}
RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
GatewayServiceConfigurationReader getGatewayServiceConfigurationReader() {
return gatewayServiceConfigurationReader;
}
RxClientCollectionCache getCollectionCache() {
return collectionCache;
}
public void setCollectionCache(RxClientCollectionCache collectionCache) {
this.collectionCache = collectionCache;
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PATCH);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PUT);
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.DELETE);
}
private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
if (request.getOperationType() != OperationType.QueryPlan) {
request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true");
}
switch (this.queryCompatibilityMode) {
case SqlQuery:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.SQL);
break;
case Default:
case Query:
default:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.QUERY_JSON);
break;
}
return this.performRequest(request, HttpMethod.POST);
}
public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) {
try {
if (request.requestContext.cosmosDiagnostics == null) {
request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics();
}
URI uri = getUri(request);
request.requestContext.resourcePhysicalAddress = uri.toString();
if (this.throughputControlStore != null) {
return this.throughputControlStore.processRequest(request, Mono.defer(() -> this.performRequestInternal(request, method, uri)));
}
return this.performRequestInternal(request, method, uri);
} catch (Exception e) {
return Mono.error(e);
}
}
/**
* Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse.
*
* @param request
* @param method
* @param requestUri
* @return Flux<RxDocumentServiceResponse>
*/
private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) {
HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size());
for (Entry<String, String> entry : this.defaultHeaders.entrySet()) {
if (!headers.containsKey(entry.getKey())) {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
if (headers != null) {
for (Entry<String, String> entry : headers.entrySet()) {
if (entry.getValue() == null) {
httpHeaders.set(entry.getKey(), "");
} else {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
}
return httpHeaders;
}
private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException {
URI rootUri = request.getEndpointOverride();
if (rootUri == null) {
if (request.getIsMedia()) {
rootUri = this.globalEndpointManager.getWriteEndpoints().get(0);
} else {
rootUri = this.globalEndpointManager.resolveServiceEndpoint(request);
}
}
String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed);
if (request.getResourceType().equals(ResourceType.DatabaseAccount)) {
path = StringUtils.EMPTY;
}
return new URI("https",
null,
rootUri.getHost(),
rootUri.getPort(),
ensureSlashPrefixed(path),
null,
null);
}
private String ensureSlashPrefixed(String path) {
if (path == null) {
return null;
}
if (path.startsWith("/")) {
return path;
}
return "/" + path;
}
/**
* Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable.
*
*
* Once the customer code subscribes to the observable returned by the CRUD APIs,
* the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made.
*
* @param httpResponseMono
* @param request
* @return {@link Mono}
*/
private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono,
RxDocumentServiceRequest request,
HttpRequest httpRequest) {
return httpResponseMono.flatMap(httpResponse -> {
HttpHeaders httpResponseHeaders = httpResponse.headers();
int httpResponseStatus = httpResponse.statusCode();
Mono<byte[]> contentObservable = httpResponse
.bodyAsByteArray()
.switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY));
return contentObservable
.map(content -> {
ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord();
if (reactorNettyRequestRecord != null) {
reactorNettyRequestRecord.setTimeCompleted(Instant.now());
}
validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content);
StoreResponse rsp = new StoreResponse(httpResponseStatus,
HttpUtils.unescape(httpResponseHeaders.toMap()),
content);
if (reactorNettyRequestRecord != null) {
rsp.setRequestTimeline(reactorNettyRequestRecord.takeTimelineSnapshot());
if (this.gatewayServerErrorInjector != null) {
rsp.setFaultInjectionRuleId(
request
.faultInjectionRequestContext
.getFaultInjectionRuleId(reactorNettyRequestRecord.getTransportRequestId()));
rsp.setFaultInjectionRuleEvaluationResults(
request
.faultInjectionRequestContext
.getFaultInjectionRuleEvaluationResults(reactorNettyRequestRecord.getTransportRequestId()));
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, globalEndpointManager);
}
return rsp;
})
.single();
}).map(rsp -> {
RxDocumentServiceResponse rxDocumentServiceResponse;
if (httpRequest.reactorNettyRequestRecord() != null) {
rxDocumentServiceResponse =
new RxDocumentServiceResponse(this.clientContext, rsp,
httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot());
} else {
rxDocumentServiceResponse =
new RxDocumentServiceResponse(this.clientContext, rsp);
}
rxDocumentServiceResponse.setCosmosDiagnostics(request.requestContext.cosmosDiagnostics);
return rxDocumentServiceResponse;
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
if (httpRequest.reactorNettyRequestRecord() != null) {
ReactorNettyRequestRecord reactorNettyRequestRecord = httpRequest.reactorNettyRequestRecord();
BridgeInternal.setRequestTimeline(dce, reactorNettyRequestRecord.takeTimelineSnapshot());
ImplementationBridgeHelpers
.CosmosExceptionHelper
.getCosmosExceptionAccessor()
.setFaultInjectionRuleId(
dce,
request.faultInjectionRequestContext
.getFaultInjectionRuleId(reactorNettyRequestRecord.getTransportRequestId()));
ImplementationBridgeHelpers
.CosmosExceptionHelper
.getCosmosExceptionAccessor()
.setFaultInjectionEvaluationResults(
dce,
request.faultInjectionRequestContext
.getFaultInjectionRuleEvaluationResults(reactorNettyRequestRecord.getTransportRequestId()));
}
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, globalEndpointManager);
}
return Mono.error(dce);
});
}
private void validateOrThrow(RxDocumentServiceRequest request,
HttpResponseStatus status,
HttpHeaders headers,
byte[] bodyAsBytes) {
int statusCode = status.code();
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) {
String statusCodeString = status.reasonPhrase() != null
? status.reasonPhrase().replace(" ", "")
: "";
String body = bodyAsBytes != null ? new String(bodyAsBytes, StandardCharsets.UTF_8) : null;
CosmosError cosmosError;
cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError();
cosmosError = new CosmosError(statusCodeString,
String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString),
cosmosError.getPartitionedQueryExecutionInfo());
CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap());
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
throw dce;
}
}
private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) {
switch (request.getOperationType()) {
case Create:
case Batch:
return this.create(request);
case Patch:
return this.patch(request);
case Upsert:
return this.upsert(request);
case Delete:
if (request.getResourceType() == ResourceType.PartitionKey) {
return this.deleteByPartitionKey(request);
}
return this.delete(request);
case ExecuteJavaScript:
return this.execute(request);
case Read:
return this.read(request);
case ReadFeed:
return this.readFeed(request);
case Replace:
return this.replace(request);
case SqlQuery:
case Query:
case QueryPlan:
return this.query(request);
default:
throw new IllegalStateException("Unknown operation setType " + request.getOperationType());
}
}
private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) {
Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single();
MetadataRequestRetryPolicy metadataRequestRetryPolicy = new MetadataRequestRetryPolicy(this.globalEndpointManager);
metadataRequestRetryPolicy.onBeforeSendRequest(request);
return BackoffRetryUtility.executeRetry(funcDelegate, metadataRequestRetryPolicy);
}
@Override
public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) {
Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request));
return responseObs.onErrorResume(
e -> {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure {}", e.getMessage(), e);
return Mono.error(e);
}
if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) &&
(dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED ||
dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT ||
(
dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND &&
!Exceptions.isSubStatusCode(dce,
HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) {
this.captureSessionToken(request, dce.getResponseHeaders());
}
if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) {
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, globalEndpointManager);
}
}
return Mono.error(dce);
}
).flatMap(response ->
this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response))
);
}
@Override
public void enableThroughputControl(ThroughputControlStore throughputControlStore) {
this.throughputControlStore = throughputControlStore;
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return Flux.empty();
}
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider, Configs configs) {
if (this.gatewayServerErrorInjector == null) {
this.gatewayServerErrorInjector = new GatewayServerErrorInjector(configs);
}
this.gatewayServerErrorInjector.registerServerErrorInjector(injectorProvider.getServerErrorInjector());
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
}
private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) {
if (request.getResourceType() == ResourceType.DocumentCollection &&
request.getOperationType() == OperationType.Delete) {
String resourceId;
if (request.getIsNameBased()) {
resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID);
} else {
resourceId = request.getResourceId();
}
this.sessionContainer.clearTokenByResourceId(resourceId);
} else {
this.sessionContainer.setSessionToken(request, responseHeaders);
}
}
private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request,
Map<String, String> responseHeaders) {
this.captureSessionToken(request, responseHeaders);
if (request.requestContext.resolvedPartitionKeyRange != null &&
StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) &&
StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) &&
!responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) {
return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid)
.flatMap(collectionRoutingMapValueHolder -> Mono.empty());
}
return Mono.empty();
}
private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) {
return applySessionToken(request).then(addIntendedCollectionRid(request));
}
private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) {
if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) {
return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> {
if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) {
request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER,
request.requestContext.resolvedCollectionRid);
} else {
request.intendedCollectionRidPassedIntoSDK = true;
}
return Mono.empty();
});
}
return Mono.empty();
}
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) {
Map<String, String> headers = request.getHeaders();
Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null");
if (isMasterOperation(request.getResourceType(), request.getOperationType())) {
if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) {
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
}
return Mono.empty();
}
boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader,
request) == ConsistencyLevel.SESSION;
if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) {
if (!sessionConsistency ||
(!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) {
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
}
return Mono.empty();
}
if (!sessionConsistency ||
(!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) {
return Mono.empty();
}
if (this.collectionCache != null && this.partitionKeyRangeCache != null) {
return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).
flatMap(collectionValueHolder -> {
if (collectionValueHolder == null || collectionValueHolder.v == null) {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collectionValueHolder.v.getResourceId(),
null,
null).flatMap(collectionRoutingMapValueHolder -> {
if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
String partitionKeyRangeId =
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID);
PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal();
if (StringUtils.isNotEmpty(partitionKeyRangeId)) {
PartitionKeyRange range =
collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId);
request.requestContext.resolvedPartitionKeyRange = range;
if (request.requestContext.resolvedPartitionKeyRange == null) {
SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId,
sessionContainer);
} else {
SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer);
}
} else if (partitionKeyInternal != null) {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
partitionKeyInternal,
collectionValueHolder.v.getPartitionKey());
PartitionKeyRange range =
collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
request.requestContext.resolvedPartitionKeyRange = range;
SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer);
} else {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
}
return Mono.empty();
});
});
} else {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
}
private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) {
return ReplicatedResourceClientUtils.isMasterResource(resourceType) ||
isStoredProcedureMasterOperation(resourceType, operationType) ||
operationType == OperationType.QueryPlan;
}
private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) {
return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript;
}
} | class RxGatewayStoreModel implements RxStoreModel {
private final static byte[] EMPTY_BYTE_ARRAY = {};
private final DiagnosticsClientContext clientContext;
private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class);
private final Map<String, String> defaultHeaders;
private final HttpClient httpClient;
private final QueryCompatibilityMode queryCompatibilityMode;
private final GlobalEndpointManager globalEndpointManager;
private ConsistencyLevel defaultConsistencyLevel;
private ISessionContainer sessionContainer;
private ThroughputControlStore throughputControlStore;
private boolean useMultipleWriteLocations;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private GatewayServiceConfigurationReader gatewayServiceConfigurationReader;
private RxClientCollectionCache collectionCache;
private GatewayServerErrorInjector gatewayServerErrorInjector;
public RxGatewayStoreModel(
DiagnosticsClientContext clientContext,
ISessionContainer sessionContainer,
ConsistencyLevel defaultConsistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
this.clientContext = clientContext;
this.defaultHeaders = new HashMap<>();
this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL,
"no-cache");
this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION,
HttpConstants.Versions.CURRENT_VERSION);
this.defaultHeaders.put(
HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES,
HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
if (apiType != null) {
this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString());
}
if (userAgentContainer == null) {
userAgentContainer = new UserAgentContainer();
}
this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent());
if (defaultConsistencyLevel != null) {
this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL,
defaultConsistencyLevel.toString());
}
this.defaultConsistencyLevel = defaultConsistencyLevel;
this.globalEndpointManager = globalEndpointManager;
this.queryCompatibilityMode = queryCompatibilityMode;
this.httpClient = httpClient;
this.sessionContainer = sessionContainer;
}
void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) {
this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader;
}
public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) {
this.partitionKeyRangeCache = partitionKeyRangeCache;
}
public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) {
this.useMultipleWriteLocations = useMultipleWriteLocations;
}
boolean isUseMultipleWriteLocations() {
return useMultipleWriteLocations;
}
RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
GatewayServiceConfigurationReader getGatewayServiceConfigurationReader() {
return gatewayServiceConfigurationReader;
}
RxClientCollectionCache getCollectionCache() {
return collectionCache;
}
public void setCollectionCache(RxClientCollectionCache collectionCache) {
this.collectionCache = collectionCache;
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PATCH);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PUT);
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.DELETE);
}
private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
if (request.getOperationType() != OperationType.QueryPlan) {
request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true");
}
switch (this.queryCompatibilityMode) {
case SqlQuery:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.SQL);
break;
case Default:
case Query:
default:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.QUERY_JSON);
break;
}
return this.performRequest(request, HttpMethod.POST);
}
public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) {
try {
if (request.requestContext.cosmosDiagnostics == null) {
request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics();
}
URI uri = getUri(request);
request.requestContext.resourcePhysicalAddress = uri.toString();
if (this.throughputControlStore != null) {
return this.throughputControlStore.processRequest(request, Mono.defer(() -> this.performRequestInternal(request, method, uri)));
}
return this.performRequestInternal(request, method, uri);
} catch (Exception e) {
return Mono.error(e);
}
}
/**
* Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse.
*
* @param request
* @param method
* @param requestUri
* @return Flux<RxDocumentServiceResponse>
*/
private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) {
HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size());
for (Entry<String, String> entry : this.defaultHeaders.entrySet()) {
if (!headers.containsKey(entry.getKey())) {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
if (headers != null) {
for (Entry<String, String> entry : headers.entrySet()) {
if (entry.getValue() == null) {
httpHeaders.set(entry.getKey(), "");
} else {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
}
return httpHeaders;
}
private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException {
URI rootUri = request.getEndpointOverride();
if (rootUri == null) {
if (request.getIsMedia()) {
rootUri = this.globalEndpointManager.getWriteEndpoints().get(0);
} else {
rootUri = this.globalEndpointManager.resolveServiceEndpoint(request);
}
}
String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed);
if (request.getResourceType().equals(ResourceType.DatabaseAccount)) {
path = StringUtils.EMPTY;
}
return new URI("https",
null,
rootUri.getHost(),
rootUri.getPort(),
ensureSlashPrefixed(path),
null,
null);
}
private String ensureSlashPrefixed(String path) {
if (path == null) {
return null;
}
if (path.startsWith("/")) {
return path;
}
return "/" + path;
}
/**
* Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable.
*
*
* Once the customer code subscribes to the observable returned by the CRUD APIs,
* the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made.
*
* @param httpResponseMono
* @param request
* @return {@link Mono}
*/
private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono,
RxDocumentServiceRequest request,
HttpRequest httpRequest) {
return httpResponseMono.flatMap(httpResponse -> {
HttpHeaders httpResponseHeaders = httpResponse.headers();
int httpResponseStatus = httpResponse.statusCode();
Mono<byte[]> contentObservable = httpResponse
.bodyAsByteArray()
.switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY));
return contentObservable
.map(content -> {
ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord();
if (reactorNettyRequestRecord != null) {
reactorNettyRequestRecord.setTimeCompleted(Instant.now());
}
validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content);
StoreResponse rsp = new StoreResponse(httpResponseStatus,
HttpUtils.unescape(httpResponseHeaders.toMap()),
content);
if (reactorNettyRequestRecord != null) {
rsp.setRequestTimeline(reactorNettyRequestRecord.takeTimelineSnapshot());
if (this.gatewayServerErrorInjector != null) {
rsp.setFaultInjectionRuleId(
request
.faultInjectionRequestContext
.getFaultInjectionRuleId(reactorNettyRequestRecord.getTransportRequestId()));
rsp.setFaultInjectionRuleEvaluationResults(
request
.faultInjectionRequestContext
.getFaultInjectionRuleEvaluationResults(reactorNettyRequestRecord.getTransportRequestId()));
}
}
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, globalEndpointManager);
}
return rsp;
})
.single();
}).map(rsp -> {
RxDocumentServiceResponse rxDocumentServiceResponse;
if (httpRequest.reactorNettyRequestRecord() != null) {
rxDocumentServiceResponse =
new RxDocumentServiceResponse(this.clientContext, rsp,
httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot());
} else {
rxDocumentServiceResponse =
new RxDocumentServiceResponse(this.clientContext, rsp);
}
rxDocumentServiceResponse.setCosmosDiagnostics(request.requestContext.cosmosDiagnostics);
return rxDocumentServiceResponse;
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
int statusCode = 0;
if (WebExceptionUtility.isNetworkFailure(exception)) {
if (WebExceptionUtility.isReadTimeoutException(exception)) {
statusCode = HttpConstants.StatusCodes.REQUEST_TIMEOUT;
} else {
statusCode = HttpConstants.StatusCodes.SERVICE_UNAVAILABLE;
}
}
dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
if (httpRequest.reactorNettyRequestRecord() != null) {
ReactorNettyRequestRecord reactorNettyRequestRecord = httpRequest.reactorNettyRequestRecord();
BridgeInternal.setRequestTimeline(dce, reactorNettyRequestRecord.takeTimelineSnapshot());
ImplementationBridgeHelpers
.CosmosExceptionHelper
.getCosmosExceptionAccessor()
.setFaultInjectionRuleId(
dce,
request.faultInjectionRequestContext
.getFaultInjectionRuleId(reactorNettyRequestRecord.getTransportRequestId()));
ImplementationBridgeHelpers
.CosmosExceptionHelper
.getCosmosExceptionAccessor()
.setFaultInjectionEvaluationResults(
dce,
request.faultInjectionRequestContext
.getFaultInjectionRuleEvaluationResults(reactorNettyRequestRecord.getTransportRequestId()));
}
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, globalEndpointManager);
}
return Mono.error(dce);
});
}
private void validateOrThrow(RxDocumentServiceRequest request,
HttpResponseStatus status,
HttpHeaders headers,
byte[] bodyAsBytes) {
int statusCode = status.code();
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) {
String statusCodeString = status.reasonPhrase() != null
? status.reasonPhrase().replace(" ", "")
: "";
String body = bodyAsBytes != null ? new String(bodyAsBytes, StandardCharsets.UTF_8) : null;
CosmosError cosmosError;
cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError();
cosmosError = new CosmosError(statusCodeString,
String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString),
cosmosError.getPartitionedQueryExecutionInfo());
CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap());
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
throw dce;
}
}
private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) {
switch (request.getOperationType()) {
case Create:
case Batch:
return this.create(request);
case Patch:
return this.patch(request);
case Upsert:
return this.upsert(request);
case Delete:
if (request.getResourceType() == ResourceType.PartitionKey) {
return this.deleteByPartitionKey(request);
}
return this.delete(request);
case ExecuteJavaScript:
return this.execute(request);
case Read:
return this.read(request);
case ReadFeed:
return this.readFeed(request);
case Replace:
return this.replace(request);
case SqlQuery:
case Query:
case QueryPlan:
return this.query(request);
default:
throw new IllegalStateException("Unknown operation setType " + request.getOperationType());
}
}
private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) {
Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single();
MetadataRequestRetryPolicy metadataRequestRetryPolicy = new MetadataRequestRetryPolicy(this.globalEndpointManager);
metadataRequestRetryPolicy.onBeforeSendRequest(request);
return BackoffRetryUtility.executeRetry(funcDelegate, metadataRequestRetryPolicy);
}
@Override
public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) {
Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request));
return responseObs.onErrorResume(
e -> {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure {}", e.getMessage(), e);
return Mono.error(e);
}
if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) &&
(dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED ||
dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT ||
(
dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND &&
!Exceptions.isSubStatusCode(dce,
HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) {
this.captureSessionToken(request, dce.getResponseHeaders());
}
if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) {
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, dce, globalEndpointManager);
}
}
return Mono.error(dce);
}
).flatMap(response ->
this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response))
);
}
@Override
public void enableThroughputControl(ThroughputControlStore throughputControlStore) {
this.throughputControlStore = throughputControlStore;
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return Flux.empty();
}
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider, Configs configs) {
if (this.gatewayServerErrorInjector == null) {
this.gatewayServerErrorInjector = new GatewayServerErrorInjector(configs);
}
this.gatewayServerErrorInjector.registerServerErrorInjector(injectorProvider.getServerErrorInjector());
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
}
private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) {
if (request.getResourceType() == ResourceType.DocumentCollection &&
request.getOperationType() == OperationType.Delete) {
String resourceId;
if (request.getIsNameBased()) {
resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID);
} else {
resourceId = request.getResourceId();
}
this.sessionContainer.clearTokenByResourceId(resourceId);
} else {
this.sessionContainer.setSessionToken(request, responseHeaders);
}
}
private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request,
Map<String, String> responseHeaders) {
this.captureSessionToken(request, responseHeaders);
if (request.requestContext.resolvedPartitionKeyRange != null &&
StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) &&
StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) &&
!responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) {
return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid)
.flatMap(collectionRoutingMapValueHolder -> Mono.empty());
}
return Mono.empty();
}
private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) {
return applySessionToken(request).then(addIntendedCollectionRid(request));
}
private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) {
if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) {
return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> {
if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) {
request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER,
request.requestContext.resolvedCollectionRid);
} else {
request.intendedCollectionRidPassedIntoSDK = true;
}
return Mono.empty();
});
}
return Mono.empty();
}
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) {
Map<String, String> headers = request.getHeaders();
Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null");
if (isMasterOperation(request.getResourceType(), request.getOperationType())) {
if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) {
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
}
return Mono.empty();
}
boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader,
request) == ConsistencyLevel.SESSION;
if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) {
if (!sessionConsistency ||
(!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) {
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
}
return Mono.empty();
}
if (!sessionConsistency ||
(!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) {
return Mono.empty();
}
if (this.collectionCache != null && this.partitionKeyRangeCache != null) {
return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).
flatMap(collectionValueHolder -> {
if (collectionValueHolder == null || collectionValueHolder.v == null) {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collectionValueHolder.v.getResourceId(),
null,
null).flatMap(collectionRoutingMapValueHolder -> {
if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
String partitionKeyRangeId =
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID);
PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal();
if (StringUtils.isNotEmpty(partitionKeyRangeId)) {
PartitionKeyRange range =
collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId);
request.requestContext.resolvedPartitionKeyRange = range;
if (request.requestContext.resolvedPartitionKeyRange == null) {
SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId,
sessionContainer);
} else {
SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer);
}
} else if (partitionKeyInternal != null) {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
partitionKeyInternal,
collectionValueHolder.v.getPartitionKey());
PartitionKeyRange range =
collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
request.requestContext.resolvedPartitionKeyRange = range;
SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer);
} else {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
}
return Mono.empty();
});
});
} else {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
}
private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) {
return ReplicatedResourceClientUtils.isMasterResource(resourceType) ||
isStoredProcedureMasterOperation(resourceType, operationType) ||
operationType == OperationType.QueryPlan;
}
private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) {
return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript;
}
} |
Added the lower bound check as well. | private void validateDataPlaneRetryPolicyResponseTimeouts(CosmosDiagnostics cosmosDiagnostics) {
List<ClientSideRequestStatistics.GatewayStatistics> gatewayStatisticsList = diagnosticsAccessor.getClientSideRequestStatistics(cosmosDiagnostics)
.stream()
.map(ClientSideRequestStatistics::getGatewayStatisticsList)
.flatMap(Collection::stream)
.collect(Collectors.toList());
for (ClientSideRequestStatistics.GatewayStatistics gs : gatewayStatisticsList) {
for (RequestTimeline.Event event : gs.getRequestTimeline()) {
Duration durationInMillis = event.getDuration();
if (durationInMillis != null) {
assertThat(durationInMillis.getSeconds()).isLessThanOrEqualTo(62);
}
}
}
} | assertThat(durationInMillis.getSeconds()).isLessThanOrEqualTo(62); | private void validateDataPlaneRetryPolicyResponseTimeouts(CosmosDiagnostics cosmosDiagnostics) {
List<ClientSideRequestStatistics.GatewayStatistics> gatewayStatisticsList = diagnosticsAccessor.getClientSideRequestStatistics(cosmosDiagnostics)
.stream()
.map(ClientSideRequestStatistics::getGatewayStatisticsList)
.flatMap(Collection::stream)
.collect(Collectors.toList());
for (ClientSideRequestStatistics.GatewayStatistics gs : gatewayStatisticsList) {
if (gs.getStatusCode() == HttpConstants.StatusCodes.REQUEST_TIMEOUT) {
for (RequestTimeline.Event event : gs.getRequestTimeline()) {
Duration durationInMillis = event.getDuration();
if (durationInMillis != null) {
assertThat(durationInMillis.getSeconds()).isLessThanOrEqualTo(62);
assertThat(durationInMillis.getSeconds()).isGreaterThanOrEqualTo(60);
}
}
}
}
} | class WebExceptionRetryPolicyE2ETests extends TestSuiteBase {
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private CosmosAsyncClient cosmosAsyncClient;
private CosmosAsyncContainer cosmosAsyncContainer;
@Factory(dataProvider = "clientBuildersWithSessionConsistency")
public WebExceptionRetryPolicyE2ETests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
this.subscriberValidationTimeout = TIMEOUT;
}
@BeforeClass(groups = {"multi-master"}, timeOut = TIMEOUT)
public void beforeClass() {
this.cosmosAsyncClient = getClientBuilder().buildAsyncClient();
this.cosmosAsyncContainer = getSharedMultiPartitionCosmosContainerWithIdAsPartitionKey(cosmosAsyncClient);
}
@AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(cosmosAsyncClient);
}
@DataProvider(name = "operationTypeProvider")
public static Object[][] operationTypeProvider() {
return new Object[][]{
{FaultInjectionOperationType.READ_ITEM, OperationType.Read}
};
}
@Test(groups = {"multi-master"}, timeOut = TIMEOUT)
public void addressRefreshHttpTimeout() {
if (BridgeInternal
.getContextClient(this.cosmosAsyncClient)
.getConnectionPolicy()
.getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("addressRefreshHttpTimeout() is only meant for DIRECT mode");
}
TestItem newItem = TestItem.createNewItem();
this.cosmosAsyncContainer.createItem(newItem).block();
FaultInjectionRule addressRefreshDelayRule = new FaultInjectionRuleBuilder("addressRefreshDelayRule")
.condition(
new FaultInjectionConditionBuilder()
.operationType(FaultInjectionOperationType.METADATA_REQUEST_ADDRESS_REFRESH)
.build())
.result(
FaultInjectionResultBuilders.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofSeconds(14))
.times(4)
.build()
)
.build();
FaultInjectionRule serverGoneRule = new FaultInjectionRuleBuilder("serverGoneRule")
.condition(
new FaultInjectionConditionBuilder()
.operationType(FaultInjectionOperationType.READ_ITEM)
.build())
.result(
FaultInjectionResultBuilders.getResultBuilder(FaultInjectionServerErrorType.GONE)
.times(4)
.build()
)
.build();
CosmosFaultInjectionHelper
.configureFaultInjectionRules(
cosmosAsyncContainer,
Arrays.asList(addressRefreshDelayRule, serverGoneRule)).block();
try {
cosmosAsyncContainer
.readItem(newItem.getId(), new PartitionKey(newItem.getId()), TestItem.class)
.block();
fail("addressRefreshHttpTimeout() should fail due to addressRefresh timeout");
} catch (CosmosException e) {
System.out.println("dataPlaneRequestHttpTimeout() Diagnostics " + " " + e.getDiagnostics());
assertThat(e.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.REQUEST_TIMEOUT);
assertThat(e.getSubStatusCode()).isEqualTo(HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
validateAddressRefreshRetryPolicyResponseTimeouts(e.getDiagnostics());
} finally {
addressRefreshDelayRule.disable();
serverGoneRule.disable();
}
}
@Test(groups = {"multi-master"}, dataProvider = "operationTypeProvider", timeOut = 8 * TIMEOUT)
public void dataPlaneRequestHttpTimeout(
FaultInjectionOperationType faultInjectionOperationType,
OperationType operationType) {
if (BridgeInternal
.getContextClient(this.cosmosAsyncClient)
.getConnectionPolicy()
.getConnectionMode() != ConnectionMode.GATEWAY) {
throw new SkipException("queryPlanHttpTimeoutWillNotMarkRegionUnavailable() is only meant for GATEWAY mode");
}
TestItem newItem = TestItem.createNewItem();
this.cosmosAsyncContainer.createItem(newItem).block();
FaultInjectionRule requestHttpTimeoutRule = new FaultInjectionRuleBuilder("requestHttpTimeoutRule" + UUID.randomUUID())
.condition(
new FaultInjectionConditionBuilder()
.operationType(faultInjectionOperationType)
.connectionType(FaultInjectionConnectionType.GATEWAY)
.build())
.result(
FaultInjectionResultBuilders.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofSeconds(66))
.times(4)
.build()
)
.build();
CosmosFaultInjectionHelper.configureFaultInjectionRules(this.cosmosAsyncContainer, Arrays.asList(requestHttpTimeoutRule)).block();
try {
CosmosDiagnostics cosmosDiagnostics =
this.performDocumentOperation(cosmosAsyncContainer, operationType, newItem).block();
System.out.println("dataPlaneRequestHttpTimeout() Diagnostics " + " " + cosmosDiagnostics);
validateDataPlaneRetryPolicyResponseTimeouts(cosmosDiagnostics);
} catch (Exception e) {
fail("dataPlaneRequestHttpTimeout() should succeed for operationType " + operationType, e);
} finally {
requestHttpTimeoutRule.disable();
}
}
private void validateAddressRefreshRetryPolicyResponseTimeouts(CosmosDiagnostics cosmosDiagnostics) {
List<ClientSideRequestStatistics.AddressResolutionStatistics> addressResolutionStatisticsList = diagnosticsAccessor.getClientSideRequestStatistics(cosmosDiagnostics)
.stream()
.map(ClientSideRequestStatistics::getAddressResolutionStatistics)
.flatMap(m -> m.values().stream())
.sorted(Comparator.comparing(ClientSideRequestStatistics.AddressResolutionStatistics::getStartTimeUTC))
.collect(Collectors.toList());
assertThat(MILLIS.between(addressResolutionStatisticsList.get(0).getStartTimeUTC(), addressResolutionStatisticsList.get(0).getEndTimeUTC())).isLessThanOrEqualTo(600);
assertThat(MILLIS.between(addressResolutionStatisticsList.get(1).getStartTimeUTC(), addressResolutionStatisticsList.get(1).getEndTimeUTC())).isLessThanOrEqualTo(600);
assertThat(SECONDS.between(addressResolutionStatisticsList.get(2).getStartTimeUTC(), addressResolutionStatisticsList.get(2).getEndTimeUTC())).isLessThanOrEqualTo(6);
assertThat(SECONDS.between(addressResolutionStatisticsList.get(3).getStartTimeUTC(), addressResolutionStatisticsList.get(3).getEndTimeUTC())).isLessThanOrEqualTo(11);
}
private Mono<CosmosDiagnostics> performDocumentOperation(
CosmosAsyncContainer cosmosAsyncContainer,
OperationType operationType,
TestItem createdItem) {
if (operationType == OperationType.Query) {
CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions();
String query = String.format("SELECT * from c where c.id = '%s'", createdItem.getId());
FeedResponse<TestItem> itemFeedResponse =
cosmosAsyncContainer.queryItems(query, queryRequestOptions, TestItem.class).byPage().blockFirst();
return Mono.just(itemFeedResponse.getCosmosDiagnostics());
}
if (operationType == OperationType.Read
|| operationType == OperationType.Delete
|| operationType == OperationType.Replace
|| operationType == OperationType.Create
|| operationType == OperationType.Patch
|| operationType == OperationType.Upsert) {
if (operationType == OperationType.Read) {
return cosmosAsyncContainer
.readItem(
createdItem.getId(),
new PartitionKey(createdItem.getId()),
TestItem.class
)
.map(itemResponse -> itemResponse.getDiagnostics());
}
if (operationType == OperationType.Replace) {
return cosmosAsyncContainer
.replaceItem(
createdItem,
createdItem.getId(),
new PartitionKey(createdItem.getId()))
.map(itemResponse -> itemResponse.getDiagnostics());
}
if (operationType == OperationType.Delete) {
return cosmosAsyncContainer.deleteItem(createdItem, null).map(itemResponse -> itemResponse.getDiagnostics());
}
if (operationType == OperationType.Create) {
return cosmosAsyncContainer.createItem(TestItem.createNewItem()).map(itemResponse -> itemResponse.getDiagnostics());
}
if (operationType == OperationType.Upsert) {
return cosmosAsyncContainer.upsertItem(TestItem.createNewItem()).map(itemResponse -> itemResponse.getDiagnostics());
}
if (operationType == OperationType.Patch) {
CosmosPatchOperations patchOperations =
CosmosPatchOperations
.create()
.add("newPath", "newPath");
return cosmosAsyncContainer
.patchItem(createdItem.getId(), new PartitionKey(createdItem.getId()), patchOperations, TestItem.class)
.map(itemResponse -> itemResponse.getDiagnostics());
}
}
if (operationType == OperationType.ReadFeed) {
List<FeedRange> feedRanges = cosmosAsyncContainer.getFeedRanges().block();
CosmosChangeFeedRequestOptions changeFeedRequestOptions =
CosmosChangeFeedRequestOptions.createForProcessingFromBeginning(feedRanges.get(0));
FeedResponse<TestItem> firstPage = cosmosAsyncContainer
.queryChangeFeed(changeFeedRequestOptions, TestItem.class)
.byPage()
.blockFirst();
return Mono.just(firstPage.getCosmosDiagnostics());
}
throw new IllegalArgumentException("The operation type is not supported");
}
} | class WebExceptionRetryPolicyE2ETests extends TestSuiteBase {
private final static Logger logger = LoggerFactory.getLogger(WebExceptionRetryPolicyE2ETests.class);
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private CosmosAsyncClient cosmosAsyncClient;
private CosmosAsyncContainer cosmosAsyncContainer;
@Factory(dataProvider = "clientBuildersWithSessionConsistency")
public WebExceptionRetryPolicyE2ETests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
this.subscriberValidationTimeout = TIMEOUT;
}
@BeforeClass(groups = {"multi-master"}, timeOut = TIMEOUT)
public void beforeClass() {
this.cosmosAsyncClient = getClientBuilder().buildAsyncClient();
this.cosmosAsyncContainer = getSharedMultiPartitionCosmosContainerWithIdAsPartitionKey(cosmosAsyncClient);
}
@AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(cosmosAsyncClient);
}
@DataProvider(name = "operationTypeProvider")
public static Object[][] operationTypeProvider() {
return new Object[][]{
{FaultInjectionOperationType.READ_ITEM, OperationType.Read},
{FaultInjectionOperationType.QUERY_ITEM, OperationType.Query}
};
}
@Test(groups = {"multi-master"}, timeOut = TIMEOUT)
public void addressRefreshHttpTimeout() {
if (BridgeInternal
.getContextClient(this.cosmosAsyncClient)
.getConnectionPolicy()
.getConnectionMode() != ConnectionMode.DIRECT) {
throw new SkipException("addressRefreshHttpTimeout() is only meant for DIRECT mode");
}
TestItem newItem = TestItem.createNewItem();
this.cosmosAsyncContainer.createItem(newItem).block();
FaultInjectionRule addressRefreshDelayRule = new FaultInjectionRuleBuilder("addressRefreshDelayRule")
.condition(
new FaultInjectionConditionBuilder()
.operationType(FaultInjectionOperationType.METADATA_REQUEST_ADDRESS_REFRESH)
.build())
.result(
FaultInjectionResultBuilders.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofSeconds(14))
.times(4)
.build()
)
.build();
FaultInjectionRule serverGoneRule = new FaultInjectionRuleBuilder("serverGoneRule")
.condition(
new FaultInjectionConditionBuilder()
.operationType(FaultInjectionOperationType.READ_ITEM)
.build())
.result(
FaultInjectionResultBuilders.getResultBuilder(FaultInjectionServerErrorType.GONE)
.times(4)
.build()
)
.build();
CosmosFaultInjectionHelper
.configureFaultInjectionRules(
cosmosAsyncContainer,
Arrays.asList(addressRefreshDelayRule, serverGoneRule)).block();
try {
cosmosAsyncContainer
.readItem(newItem.getId(), new PartitionKey(newItem.getId()), TestItem.class)
.block();
fail("addressRefreshHttpTimeout() should fail due to addressRefresh timeout");
} catch (CosmosException e) {
logger.info("dataPlaneRequestHttpTimeout() Diagnostics " + " " + e.getDiagnostics());
assertThat(e.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.REQUEST_TIMEOUT);
assertThat(e.getSubStatusCode()).isEqualTo(HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
validateAddressRefreshRetryPolicyResponseTimeouts(e.getDiagnostics());
} finally {
addressRefreshDelayRule.disable();
serverGoneRule.disable();
}
}
@Test(groups = {"multi-master"}, dataProvider = "operationTypeProvider", timeOut = 8 * TIMEOUT)
public void dataPlaneRequestHttpTimeout(
FaultInjectionOperationType faultInjectionOperationType,
OperationType operationType) {
if (BridgeInternal
.getContextClient(this.cosmosAsyncClient)
.getConnectionPolicy()
.getConnectionMode() != ConnectionMode.GATEWAY) {
throw new SkipException("queryPlanHttpTimeoutWillNotMarkRegionUnavailable() is only meant for GATEWAY mode");
}
TestItem newItem = TestItem.createNewItem();
this.cosmosAsyncContainer.createItem(newItem).block();
FaultInjectionRule requestHttpTimeoutRule = new FaultInjectionRuleBuilder("requestHttpTimeoutRule" + UUID.randomUUID())
.condition(
new FaultInjectionConditionBuilder()
.operationType(faultInjectionOperationType)
.connectionType(FaultInjectionConnectionType.GATEWAY)
.build())
.result(
FaultInjectionResultBuilders.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofSeconds(66))
.times(3)
.build()
)
.build();
CosmosFaultInjectionHelper.configureFaultInjectionRules(this.cosmosAsyncContainer, Arrays.asList(requestHttpTimeoutRule)).block();
try {
CosmosDiagnostics cosmosDiagnostics =
this.performDocumentOperation(cosmosAsyncContainer, operationType, newItem).block();
logger.info("dataPlaneRequestHttpTimeout() Diagnostics " + " " + cosmosDiagnostics);
validateDataPlaneRetryPolicyResponseTimeouts(cosmosDiagnostics);
} catch (Exception e) {
fail("dataPlaneRequestHttpTimeout() should succeed for operationType " + operationType, e);
} finally {
requestHttpTimeoutRule.disable();
}
}
@Test(groups = {"multi-master"}, timeOut = 8 * TIMEOUT)
public void writeOperationRequestHttpTimeout() {
if (BridgeInternal
.getContextClient(this.cosmosAsyncClient)
.getConnectionPolicy()
.getConnectionMode() != ConnectionMode.GATEWAY) {
throw new SkipException("queryPlanHttpTimeoutWillNotMarkRegionUnavailable() is only meant for GATEWAY mode");
}
TestItem newItem = TestItem.createNewItem();
this.cosmosAsyncContainer.createItem(newItem).block();
FaultInjectionRule requestHttpTimeoutRule = new FaultInjectionRuleBuilder("requestHttpTimeoutRule" + UUID.randomUUID())
.condition(
new FaultInjectionConditionBuilder()
.operationType(FaultInjectionOperationType.CREATE_ITEM)
.connectionType(FaultInjectionConnectionType.GATEWAY)
.build())
.result(
FaultInjectionResultBuilders.getResultBuilder(FaultInjectionServerErrorType.RESPONSE_DELAY)
.delay(Duration.ofSeconds(66))
.times(2)
.build()
)
.build();
CosmosFaultInjectionHelper.configureFaultInjectionRules(this.cosmosAsyncContainer, Arrays.asList(requestHttpTimeoutRule)).block();
try {
CosmosDiagnostics cosmosDiagnostics =
this.performDocumentOperation(cosmosAsyncContainer, OperationType.Create, newItem).block();
fail("writeOperationRequestHttpTimeout() should fail for operationType " + OperationType.Create);
} catch (CosmosException e) {
logger.info("writeOperationRequestHttpTimeout() Diagnostics " + " " + e.getDiagnostics());
assertThat(e.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.REQUEST_TIMEOUT);
} finally {
requestHttpTimeoutRule.disable();
}
}
@Test(groups = {"multi-master"}, timeOut = 8 * TIMEOUT)
public void writeOperationConnectionTimeout() {
if (BridgeInternal
.getContextClient(this.cosmosAsyncClient)
.getConnectionPolicy()
.getConnectionMode() != ConnectionMode.GATEWAY) {
throw new SkipException("queryPlanHttpTimeoutWillNotMarkRegionUnavailable() is only meant for GATEWAY mode");
}
TestItem newItem = TestItem.createNewItem();
this.cosmosAsyncContainer.createItem(newItem).block();
FaultInjectionRule requestHttpTimeoutRule = new FaultInjectionRuleBuilder("requestHttpTimeoutRule" + UUID.randomUUID())
.condition(
new FaultInjectionConditionBuilder()
.operationType(FaultInjectionOperationType.CREATE_ITEM)
.connectionType(FaultInjectionConnectionType.GATEWAY)
.build())
.result(
FaultInjectionResultBuilders.getResultBuilder(FaultInjectionServerErrorType.CONNECTION_DELAY)
.delay(Duration.ofSeconds(66))
.times(3)
.build()
)
.build();
CosmosFaultInjectionHelper.configureFaultInjectionRules(this.cosmosAsyncContainer, Arrays.asList(requestHttpTimeoutRule)).block();
try {
CosmosDiagnostics cosmosDiagnostics =
this.performDocumentOperation(cosmosAsyncContainer, OperationType.Create, newItem).block();
logger.info("writeOperationConnectionTimeout() Diagnostics " + " " + cosmosDiagnostics);
} catch (CosmosException e) {
fail("writeOperationConnectionTimeout() should pass for operationType " + OperationType.Create);
assertThat(e.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.SERVICE_UNAVAILABLE);
} finally {
requestHttpTimeoutRule.disable();
}
}
private void validateAddressRefreshRetryPolicyResponseTimeouts(CosmosDiagnostics cosmosDiagnostics) {
List<ClientSideRequestStatistics.AddressResolutionStatistics> addressResolutionStatisticsList = diagnosticsAccessor.getClientSideRequestStatistics(cosmosDiagnostics)
.stream()
.map(ClientSideRequestStatistics::getAddressResolutionStatistics)
.flatMap(m -> m.values().stream())
.sorted(Comparator.comparing(ClientSideRequestStatistics.AddressResolutionStatistics::getStartTimeUTC))
.collect(Collectors.toList());
assertThat(MILLIS.between(addressResolutionStatisticsList.get(0).getStartTimeUTC(), addressResolutionStatisticsList.get(0).getEndTimeUTC())).isLessThanOrEqualTo(600);
assertThat(SECONDS.between(addressResolutionStatisticsList.get(1).getStartTimeUTC(), addressResolutionStatisticsList.get(1).getEndTimeUTC())).isLessThanOrEqualTo(6);
assertThat(SECONDS.between(addressResolutionStatisticsList.get(2).getStartTimeUTC(), addressResolutionStatisticsList.get(2).getEndTimeUTC())).isLessThanOrEqualTo(11);
}
private Mono<CosmosDiagnostics> performDocumentOperation(
CosmosAsyncContainer cosmosAsyncContainer,
OperationType operationType,
TestItem createdItem) {
switch(operationType) {
case Query:
CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions();
String query = String.format("SELECT * from c where c.id = '%s'", createdItem.getId());
FeedResponse<TestItem> itemFeedResponse =
cosmosAsyncContainer.queryItems(query, queryRequestOptions, TestItem.class).byPage().blockFirst();
return Mono.just(itemFeedResponse.getCosmosDiagnostics());
case Read:
return cosmosAsyncContainer
.readItem(
createdItem.getId(),
new PartitionKey(createdItem.getId()),
TestItem.class
)
.map(itemResponse -> itemResponse.getDiagnostics());
case Replace:
return cosmosAsyncContainer
.replaceItem(
createdItem,
createdItem.getId(),
new PartitionKey(createdItem.getId()))
.map(itemResponse -> itemResponse.getDiagnostics());
case Delete:
return cosmosAsyncContainer.deleteItem(createdItem, null).map(itemResponse -> itemResponse.getDiagnostics());
case Create:
return cosmosAsyncContainer.createItem(TestItem.createNewItem()).map(itemResponse -> itemResponse.getDiagnostics());
case Upsert:
return cosmosAsyncContainer.upsertItem(TestItem.createNewItem()).map(itemResponse -> itemResponse.getDiagnostics());
case Patch:
CosmosPatchOperations patchOperations =
CosmosPatchOperations
.create()
.add("newPath", "newPath");
return cosmosAsyncContainer
.patchItem(createdItem.getId(), new PartitionKey(createdItem.getId()), patchOperations, TestItem.class)
.map(itemResponse -> itemResponse.getDiagnostics());
case ReadFeed:
List<FeedRange> feedRanges = cosmosAsyncContainer.getFeedRanges().block();
CosmosChangeFeedRequestOptions changeFeedRequestOptions =
CosmosChangeFeedRequestOptions.createForProcessingFromBeginning(feedRanges.get(0));
FeedResponse<TestItem> firstPage = cosmosAsyncContainer
.queryChangeFeed(changeFeedRequestOptions, TestItem.class)
.byPage()
.blockFirst();
return Mono.just(firstPage.getCosmosDiagnostics());
}
throw new IllegalArgumentException("The operation type is not supported");
}
} |
setting the responseTimeout should happen after the retryCount increase? | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (this.isOutOfRetries()) {
logger
.warn(
"WebExceptionRetryPolicy() No more retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetry());
}
if (WebExceptionUtility.isNetworkFailure(e)) {
if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) {
int delayInSeconds = this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getDelayForNextRequestInSeconds();
this.request.setResponseTimeout(this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getResponseTimeout());
retryCount++;
logger
.warn("WebExceptionRetryPolicy() Retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " +
"shouldForceCollectionRoutingMapRefresh = {}",
this.locationEndpoint, this.request.getOperationType(), this.retryCount,
this.request.isAddressRefresh(),
this.request.shouldForceAddressRefresh(),
this.request.forceCollectionRoutingMapRefresh);
return Mono.just(ShouldRetryResult.retryAfter(Duration.ofSeconds(delayInSeconds)));
}
}
logger
.warn(
"WebExceptionRetryPolicy() No retrying on un-retryable exceptions on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
} | this.request.setResponseTimeout(this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getResponseTimeout()); | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (this.isOutOfRetries()) {
logger
.warn(
"WebExceptionRetryPolicy() No more retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetry());
}
if (WebExceptionUtility.isNetworkFailure(e)) {
if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) {
int delayInSeconds = this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getDelayForNextRequestInSeconds();
retryCount++;
logger
.warn("WebExceptionRetryPolicy() Retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " +
"shouldForceCollectionRoutingMapRefresh = {}",
this.locationEndpoint, this.request.getOperationType(), this.retryCount,
this.request.isAddressRefresh(),
this.request.shouldForceAddressRefresh(),
this.request.forceCollectionRoutingMapRefresh);
this.request.setResponseTimeout(this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getResponseTimeout());
return Mono.just(ShouldRetryResult.retryAfter(Duration.ofSeconds(delayInSeconds)));
}
}
logger
.warn(
"WebExceptionRetryPolicy() No retrying on un-retryable exceptions on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
} | class WebExceptionRetryPolicy implements IRetryPolicy {
private final static Logger logger = LoggerFactory.getLogger(WebExceptionRetryPolicy.class);
private StopWatch durationTimer = new StopWatch();
private RetryContext retryContext;
private RxDocumentServiceRequest request;
private HttpTimeoutPolicy timeoutPolicy;
private boolean isReadRequest;
private int retryCount = 0;
private URI locationEndpoint;
public WebExceptionRetryPolicy() {
durationTimer.start();
}
public WebExceptionRetryPolicy(RetryContext retryContext) {
durationTimer.start();
this.retryContext = retryContext;
this.timeoutPolicy = HttpTimeoutPolicyDefault.INSTANCE;
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
public void onBeforeSendRequest(RxDocumentServiceRequest request) {
this.request = request;
this.isReadRequest = request.isReadOnlyRequest();
this.timeoutPolicy = HttpTimeoutPolicy.getTimeoutPolicy(request);
this.request.setResponseTimeout(timeoutPolicy.getTimeoutAndDelaysList().get(0).getResponseTimeout());
this.locationEndpoint = request.requestContext.locationEndpointToRoute;
}
private boolean isOutOfRetries() {
return this.retryCount >= this.timeoutPolicy.totalRetryCount();
}
} | class WebExceptionRetryPolicy implements IRetryPolicy {
private final static Logger logger = LoggerFactory.getLogger(WebExceptionRetryPolicy.class);
private StopWatch durationTimer = new StopWatch();
private RetryContext retryContext;
private RxDocumentServiceRequest request;
private HttpTimeoutPolicy timeoutPolicy;
private boolean isReadRequest;
private int retryCount = 0;
private URI locationEndpoint;
public WebExceptionRetryPolicy() {
durationTimer.start();
}
public WebExceptionRetryPolicy(RetryContext retryContext) {
durationTimer.start();
this.retryContext = retryContext;
this.timeoutPolicy = HttpTimeoutPolicyDefault.INSTANCE;
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
public void onBeforeSendRequest(RxDocumentServiceRequest request) {
this.request = request;
this.isReadRequest = request.isReadOnlyRequest();
this.timeoutPolicy = HttpTimeoutPolicy.getTimeoutPolicy(request);
this.request.setResponseTimeout(timeoutPolicy.getTimeoutAndDelaysList().get(0).getResponseTimeout());
this.locationEndpoint = request.requestContext.locationEndpointToRoute;
}
private boolean isOutOfRetries() {
return this.retryCount >= this.timeoutPolicy.totalRetryCount();
}
} |
I think the isOutOfRetries should `this.retryCount >= this.timeoutPolicy.totalRetryCount()-1;` **Flows like following:** timeout 1, delay 1 timeout 2, delay 2 timeout 3, delay 3 shouldRetry 0, timeout2, delay1 shouldRetry 1, timeout3, delay 2 shouldRetry 2 -> out of retries | private boolean isOutOfRetries() {
return this.retryCount >= this.timeoutPolicy.totalRetryCount();
} | return this.retryCount >= this.timeoutPolicy.totalRetryCount(); | private boolean isOutOfRetries() {
return this.retryCount >= this.timeoutPolicy.totalRetryCount();
} | class WebExceptionRetryPolicy implements IRetryPolicy {
private final static Logger logger = LoggerFactory.getLogger(WebExceptionRetryPolicy.class);
private StopWatch durationTimer = new StopWatch();
private RetryContext retryContext;
private RxDocumentServiceRequest request;
private HttpTimeoutPolicy timeoutPolicy;
private boolean isReadRequest;
private int retryCount = 0;
private URI locationEndpoint;
public WebExceptionRetryPolicy() {
durationTimer.start();
}
public WebExceptionRetryPolicy(RetryContext retryContext) {
durationTimer.start();
this.retryContext = retryContext;
this.timeoutPolicy = HttpTimeoutPolicyDefault.INSTANCE;
}
@Override
public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (this.isOutOfRetries()) {
logger
.warn(
"WebExceptionRetryPolicy() No more retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetry());
}
if (WebExceptionUtility.isNetworkFailure(e)) {
if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) {
int delayInSeconds = this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getDelayForNextRequestInSeconds();
this.request.setResponseTimeout(this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getResponseTimeout());
retryCount++;
logger
.warn("WebExceptionRetryPolicy() Retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " +
"shouldForceCollectionRoutingMapRefresh = {}",
this.locationEndpoint, this.request.getOperationType(), this.retryCount,
this.request.isAddressRefresh(),
this.request.shouldForceAddressRefresh(),
this.request.forceCollectionRoutingMapRefresh);
return Mono.just(ShouldRetryResult.retryAfter(Duration.ofSeconds(delayInSeconds)));
}
}
logger
.warn(
"WebExceptionRetryPolicy() No retrying on un-retryable exceptions on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
public void onBeforeSendRequest(RxDocumentServiceRequest request) {
this.request = request;
this.isReadRequest = request.isReadOnlyRequest();
this.timeoutPolicy = HttpTimeoutPolicy.getTimeoutPolicy(request);
this.request.setResponseTimeout(timeoutPolicy.getTimeoutAndDelaysList().get(0).getResponseTimeout());
this.locationEndpoint = request.requestContext.locationEndpointToRoute;
}
} | class WebExceptionRetryPolicy implements IRetryPolicy {
private final static Logger logger = LoggerFactory.getLogger(WebExceptionRetryPolicy.class);
private StopWatch durationTimer = new StopWatch();
private RetryContext retryContext;
private RxDocumentServiceRequest request;
private HttpTimeoutPolicy timeoutPolicy;
private boolean isReadRequest;
private int retryCount = 0;
private URI locationEndpoint;
public WebExceptionRetryPolicy() {
durationTimer.start();
}
public WebExceptionRetryPolicy(RetryContext retryContext) {
durationTimer.start();
this.retryContext = retryContext;
this.timeoutPolicy = HttpTimeoutPolicyDefault.INSTANCE;
}
@Override
public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (this.isOutOfRetries()) {
logger
.warn(
"WebExceptionRetryPolicy() No more retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetry());
}
if (WebExceptionUtility.isNetworkFailure(e)) {
if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) {
int delayInSeconds = this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getDelayForNextRequestInSeconds();
retryCount++;
logger
.warn("WebExceptionRetryPolicy() Retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " +
"shouldForceCollectionRoutingMapRefresh = {}",
this.locationEndpoint, this.request.getOperationType(), this.retryCount,
this.request.isAddressRefresh(),
this.request.shouldForceAddressRefresh(),
this.request.forceCollectionRoutingMapRefresh);
this.request.setResponseTimeout(this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getResponseTimeout());
return Mono.just(ShouldRetryResult.retryAfter(Duration.ofSeconds(delayInSeconds)));
}
}
logger
.warn(
"WebExceptionRetryPolicy() No retrying on un-retryable exceptions on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
public void onBeforeSendRequest(RxDocumentServiceRequest request) {
this.request = request;
this.isReadRequest = request.isReadOnlyRequest();
this.timeoutPolicy = HttpTimeoutPolicy.getTimeoutPolicy(request);
this.request.setResponseTimeout(timeoutPolicy.getTimeoutAndDelaysList().get(0).getResponseTimeout());
this.locationEndpoint = request.requestContext.locationEndpointToRoute;
}
} |
Updated code | private boolean isOutOfRetries() {
return this.retryCount >= this.timeoutPolicy.totalRetryCount();
} | return this.retryCount >= this.timeoutPolicy.totalRetryCount(); | private boolean isOutOfRetries() {
return this.retryCount >= this.timeoutPolicy.totalRetryCount();
} | class WebExceptionRetryPolicy implements IRetryPolicy {
private final static Logger logger = LoggerFactory.getLogger(WebExceptionRetryPolicy.class);
private StopWatch durationTimer = new StopWatch();
private RetryContext retryContext;
private RxDocumentServiceRequest request;
private HttpTimeoutPolicy timeoutPolicy;
private boolean isReadRequest;
private int retryCount = 0;
private URI locationEndpoint;
public WebExceptionRetryPolicy() {
durationTimer.start();
}
public WebExceptionRetryPolicy(RetryContext retryContext) {
durationTimer.start();
this.retryContext = retryContext;
this.timeoutPolicy = HttpTimeoutPolicyDefault.INSTANCE;
}
@Override
public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (this.isOutOfRetries()) {
logger
.warn(
"WebExceptionRetryPolicy() No more retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetry());
}
if (WebExceptionUtility.isNetworkFailure(e)) {
if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) {
int delayInSeconds = this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getDelayForNextRequestInSeconds();
this.request.setResponseTimeout(this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getResponseTimeout());
retryCount++;
logger
.warn("WebExceptionRetryPolicy() Retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " +
"shouldForceCollectionRoutingMapRefresh = {}",
this.locationEndpoint, this.request.getOperationType(), this.retryCount,
this.request.isAddressRefresh(),
this.request.shouldForceAddressRefresh(),
this.request.forceCollectionRoutingMapRefresh);
return Mono.just(ShouldRetryResult.retryAfter(Duration.ofSeconds(delayInSeconds)));
}
}
logger
.warn(
"WebExceptionRetryPolicy() No retrying on un-retryable exceptions on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
public void onBeforeSendRequest(RxDocumentServiceRequest request) {
this.request = request;
this.isReadRequest = request.isReadOnlyRequest();
this.timeoutPolicy = HttpTimeoutPolicy.getTimeoutPolicy(request);
this.request.setResponseTimeout(timeoutPolicy.getTimeoutAndDelaysList().get(0).getResponseTimeout());
this.locationEndpoint = request.requestContext.locationEndpointToRoute;
}
} | class WebExceptionRetryPolicy implements IRetryPolicy {
private final static Logger logger = LoggerFactory.getLogger(WebExceptionRetryPolicy.class);
private StopWatch durationTimer = new StopWatch();
private RetryContext retryContext;
private RxDocumentServiceRequest request;
private HttpTimeoutPolicy timeoutPolicy;
private boolean isReadRequest;
private int retryCount = 0;
private URI locationEndpoint;
public WebExceptionRetryPolicy() {
durationTimer.start();
}
public WebExceptionRetryPolicy(RetryContext retryContext) {
durationTimer.start();
this.retryContext = retryContext;
this.timeoutPolicy = HttpTimeoutPolicyDefault.INSTANCE;
}
@Override
public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (this.isOutOfRetries()) {
logger
.warn(
"WebExceptionRetryPolicy() No more retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetry());
}
if (WebExceptionUtility.isNetworkFailure(e)) {
if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) {
int delayInSeconds = this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getDelayForNextRequestInSeconds();
retryCount++;
logger
.warn("WebExceptionRetryPolicy() Retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " +
"shouldForceCollectionRoutingMapRefresh = {}",
this.locationEndpoint, this.request.getOperationType(), this.retryCount,
this.request.isAddressRefresh(),
this.request.shouldForceAddressRefresh(),
this.request.forceCollectionRoutingMapRefresh);
this.request.setResponseTimeout(this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getResponseTimeout());
return Mono.just(ShouldRetryResult.retryAfter(Duration.ofSeconds(delayInSeconds)));
}
}
logger
.warn(
"WebExceptionRetryPolicy() No retrying on un-retryable exceptions on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
}
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
public void onBeforeSendRequest(RxDocumentServiceRequest request) {
this.request = request;
this.isReadRequest = request.isReadOnlyRequest();
this.timeoutPolicy = HttpTimeoutPolicy.getTimeoutPolicy(request);
this.request.setResponseTimeout(timeoutPolicy.getTimeoutAndDelaysList().get(0).getResponseTimeout());
this.locationEndpoint = request.requestContext.locationEndpointToRoute;
}
} |
Updated the code | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (this.isOutOfRetries()) {
logger
.warn(
"WebExceptionRetryPolicy() No more retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetry());
}
if (WebExceptionUtility.isNetworkFailure(e)) {
if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) {
int delayInSeconds = this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getDelayForNextRequestInSeconds();
this.request.setResponseTimeout(this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getResponseTimeout());
retryCount++;
logger
.warn("WebExceptionRetryPolicy() Retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " +
"shouldForceCollectionRoutingMapRefresh = {}",
this.locationEndpoint, this.request.getOperationType(), this.retryCount,
this.request.isAddressRefresh(),
this.request.shouldForceAddressRefresh(),
this.request.forceCollectionRoutingMapRefresh);
return Mono.just(ShouldRetryResult.retryAfter(Duration.ofSeconds(delayInSeconds)));
}
}
logger
.warn(
"WebExceptionRetryPolicy() No retrying on un-retryable exceptions on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
} | this.request.setResponseTimeout(this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getResponseTimeout()); | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (this.isOutOfRetries()) {
logger
.warn(
"WebExceptionRetryPolicy() No more retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetry());
}
if (WebExceptionUtility.isNetworkFailure(e)) {
if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) {
int delayInSeconds = this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getDelayForNextRequestInSeconds();
retryCount++;
logger
.warn("WebExceptionRetryPolicy() Retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " +
"shouldForceCollectionRoutingMapRefresh = {}",
this.locationEndpoint, this.request.getOperationType(), this.retryCount,
this.request.isAddressRefresh(),
this.request.shouldForceAddressRefresh(),
this.request.forceCollectionRoutingMapRefresh);
this.request.setResponseTimeout(this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getResponseTimeout());
return Mono.just(ShouldRetryResult.retryAfter(Duration.ofSeconds(delayInSeconds)));
}
}
logger
.warn(
"WebExceptionRetryPolicy() No retrying on un-retryable exceptions on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
} | class WebExceptionRetryPolicy implements IRetryPolicy {
private final static Logger logger = LoggerFactory.getLogger(WebExceptionRetryPolicy.class);
private StopWatch durationTimer = new StopWatch();
private RetryContext retryContext;
private RxDocumentServiceRequest request;
private HttpTimeoutPolicy timeoutPolicy;
private boolean isReadRequest;
private int retryCount = 0;
private URI locationEndpoint;
public WebExceptionRetryPolicy() {
durationTimer.start();
}
public WebExceptionRetryPolicy(RetryContext retryContext) {
durationTimer.start();
this.retryContext = retryContext;
this.timeoutPolicy = HttpTimeoutPolicyDefault.INSTANCE;
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
public void onBeforeSendRequest(RxDocumentServiceRequest request) {
this.request = request;
this.isReadRequest = request.isReadOnlyRequest();
this.timeoutPolicy = HttpTimeoutPolicy.getTimeoutPolicy(request);
this.request.setResponseTimeout(timeoutPolicy.getTimeoutAndDelaysList().get(0).getResponseTimeout());
this.locationEndpoint = request.requestContext.locationEndpointToRoute;
}
private boolean isOutOfRetries() {
return this.retryCount >= this.timeoutPolicy.totalRetryCount();
}
} | class WebExceptionRetryPolicy implements IRetryPolicy {
private final static Logger logger = LoggerFactory.getLogger(WebExceptionRetryPolicy.class);
private StopWatch durationTimer = new StopWatch();
private RetryContext retryContext;
private RxDocumentServiceRequest request;
private HttpTimeoutPolicy timeoutPolicy;
private boolean isReadRequest;
private int retryCount = 0;
private URI locationEndpoint;
public WebExceptionRetryPolicy() {
durationTimer.start();
}
public WebExceptionRetryPolicy(RetryContext retryContext) {
durationTimer.start();
this.retryContext = retryContext;
this.timeoutPolicy = HttpTimeoutPolicyDefault.INSTANCE;
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
public void onBeforeSendRequest(RxDocumentServiceRequest request) {
this.request = request;
this.isReadRequest = request.isReadOnlyRequest();
this.timeoutPolicy = HttpTimeoutPolicy.getTimeoutPolicy(request);
this.request.setResponseTimeout(timeoutPolicy.getTimeoutAndDelaysList().get(0).getResponseTimeout());
this.locationEndpoint = request.requestContext.locationEndpointToRoute;
}
private boolean isOutOfRetries() {
return this.retryCount >= this.timeoutPolicy.totalRetryCount();
}
} |
Actually we have an issue here, not introduced in this PR. The issue is: AddressRefresh on httpTimeout is not being retried for write operations. Tracking item: https://github.com/Azure/azure-sdk-for-java/issues/37248 | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (this.isOutOfRetries()) {
logger
.warn(
"WebExceptionRetryPolicy() No more retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetry());
}
if (WebExceptionUtility.isNetworkFailure(e)) {
if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) {
int delayInSeconds = this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getDelayForNextRequestInSeconds();
retryCount++;
logger
.warn("WebExceptionRetryPolicy() Retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " +
"shouldForceCollectionRoutingMapRefresh = {}",
this.locationEndpoint, this.request.getOperationType(), this.retryCount,
this.request.isAddressRefresh(),
this.request.shouldForceAddressRefresh(),
this.request.forceCollectionRoutingMapRefresh);
this.request.setResponseTimeout(this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getResponseTimeout());
return Mono.just(ShouldRetryResult.retryAfter(Duration.ofSeconds(delayInSeconds)));
}
}
logger
.warn(
"WebExceptionRetryPolicy() No retrying on un-retryable exceptions on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
} | if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) { | public Mono<ShouldRetryResult> shouldRetry(Exception e) {
if (this.isOutOfRetries()) {
logger
.warn(
"WebExceptionRetryPolicy() No more retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetry());
}
if (WebExceptionUtility.isNetworkFailure(e)) {
if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) {
int delayInSeconds = this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getDelayForNextRequestInSeconds();
retryCount++;
logger
.warn("WebExceptionRetryPolicy() Retrying on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " +
"shouldForceCollectionRoutingMapRefresh = {}",
this.locationEndpoint, this.request.getOperationType(), this.retryCount,
this.request.isAddressRefresh(),
this.request.shouldForceAddressRefresh(),
this.request.forceCollectionRoutingMapRefresh);
this.request.setResponseTimeout(this.timeoutPolicy.getTimeoutAndDelaysList().get(this.retryCount).getResponseTimeout());
return Mono.just(ShouldRetryResult.retryAfter(Duration.ofSeconds(delayInSeconds)));
}
}
logger
.warn(
"WebExceptionRetryPolicy() No retrying on un-retryable exceptions on endpoint {}, operationType = {}, count = {}, " +
"isAddressRefresh = {}",
this.locationEndpoint,
this.request.getOperationType(),
this.retryCount,
this.request.isAddressRefresh());
this.durationTimer.stop();
return Mono.just(ShouldRetryResult.noRetryOnNonRelatedException());
} | class WebExceptionRetryPolicy implements IRetryPolicy {
private final static Logger logger = LoggerFactory.getLogger(WebExceptionRetryPolicy.class);
private StopWatch durationTimer = new StopWatch();
private RetryContext retryContext;
private RxDocumentServiceRequest request;
private HttpTimeoutPolicy timeoutPolicy;
private boolean isReadRequest;
private int retryCount = 0;
private URI locationEndpoint;
public WebExceptionRetryPolicy() {
durationTimer.start();
}
public WebExceptionRetryPolicy(RetryContext retryContext) {
durationTimer.start();
this.retryContext = retryContext;
this.timeoutPolicy = HttpTimeoutPolicyDefault.INSTANCE;
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
public void onBeforeSendRequest(RxDocumentServiceRequest request) {
this.request = request;
this.isReadRequest = request.isReadOnlyRequest();
this.timeoutPolicy = HttpTimeoutPolicy.getTimeoutPolicy(request);
this.request.setResponseTimeout(timeoutPolicy.getTimeoutAndDelaysList().get(0).getResponseTimeout());
this.locationEndpoint = request.requestContext.locationEndpointToRoute;
}
private boolean isOutOfRetries() {
return this.retryCount >= this.timeoutPolicy.totalRetryCount();
}
} | class WebExceptionRetryPolicy implements IRetryPolicy {
private final static Logger logger = LoggerFactory.getLogger(WebExceptionRetryPolicy.class);
private StopWatch durationTimer = new StopWatch();
private RetryContext retryContext;
private RxDocumentServiceRequest request;
private HttpTimeoutPolicy timeoutPolicy;
private boolean isReadRequest;
private int retryCount = 0;
private URI locationEndpoint;
public WebExceptionRetryPolicy() {
durationTimer.start();
}
public WebExceptionRetryPolicy(RetryContext retryContext) {
durationTimer.start();
this.retryContext = retryContext;
this.timeoutPolicy = HttpTimeoutPolicyDefault.INSTANCE;
}
@Override
@Override
public RetryContext getRetryContext() {
return this.retryContext;
}
public void onBeforeSendRequest(RxDocumentServiceRequest request) {
this.request = request;
this.isReadRequest = request.isReadOnlyRequest();
this.timeoutPolicy = HttpTimeoutPolicy.getTimeoutPolicy(request);
this.request.setResponseTimeout(timeoutPolicy.getTimeoutAndDelaysList().get(0).getResponseTimeout());
this.locationEndpoint = request.requestContext.locationEndpointToRoute;
}
private boolean isOutOfRetries() {
return this.retryCount >= this.timeoutPolicy.totalRetryCount();
}
} |
This uses a valid audience ```suggestion .audience(DataLakeAudience.getDataLakeServiceAccountAudience("badAudience")) ``` | public void audienceError() {
DataLakeDirectoryClient aadDirClient = getPathClientBuilderWithTokenCredential(
ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint(), dc.getDirectoryPath())
.fileSystemName(dataLakeFileSystemClient.getFileSystemName())
.audience(DataLakeAudience.getDataLakeServiceAccountAudience(dataLakeFileSystemClient.getAccountName()))
.buildDirectoryClient();
DataLakeStorageException e = assertThrows(DataLakeStorageException.class, aadDirClient::exists);
assertEquals(BlobErrorCode.INVALID_AUTHENTICATION_INFO.toString(), e.getErrorCode());
} | .audience(DataLakeAudience.getDataLakeServiceAccountAudience(dataLakeFileSystemClient.getAccountName())) | public void audienceError() {
DataLakeDirectoryClient aadDirClient = getPathClientBuilderWithTokenCredential(
ENVIRONMENT.getDataLakeAccount().getDataLakeEndpoint(), dc.getDirectoryPath())
.fileSystemName(dataLakeFileSystemClient.getFileSystemName())
.audience(DataLakeAudience.createDataLakeServiceAccountAudience("badAudience"))
.buildDirectoryClient();
DataLakeStorageException e = assertThrows(DataLakeStorageException.class, aadDirClient::exists);
assertEquals(BlobErrorCode.INVALID_AUTHENTICATION_INFO.toString(), e.getErrorCode());
} | class InMemoryAccessControlRecursiveChangeProgress implements Consumer<Response<AccessControlChanges>> {
List<AccessControlChangeFailure> failures = new ArrayList<>();
List<AccessControlChangeCounters> batchCounters = new ArrayList<>();
List<AccessControlChangeCounters> cumulativeCounters = new ArrayList<>();
List<AccessControlChangeFailure> firstFailures = new ArrayList<>();
boolean firstFailure = false;
@Override
public void accept(Response<AccessControlChanges> response) {
if (!firstFailure && response.getValue().getBatchFailures().size() > 0) {
firstFailures.addAll(response.getValue().getBatchFailures());
firstFailure = true;
}
failures.addAll(response.getValue().getBatchFailures());
batchCounters.add(response.getValue().getBatchCounters());
cumulativeCounters.add(response.getValue().getAggregateCounters());
}
} | class InMemoryAccessControlRecursiveChangeProgress implements Consumer<Response<AccessControlChanges>> {
List<AccessControlChangeFailure> failures = new ArrayList<>();
List<AccessControlChangeCounters> batchCounters = new ArrayList<>();
List<AccessControlChangeCounters> cumulativeCounters = new ArrayList<>();
List<AccessControlChangeFailure> firstFailures = new ArrayList<>();
boolean firstFailure = false;
@Override
public void accept(Response<AccessControlChanges> response) {
if (!firstFailure && response.getValue().getBatchFailures().size() > 0) {
firstFailures.addAll(response.getValue().getBatchFailures());
firstFailure = true;
}
failures.addAll(response.getValue().getBatchFailures());
batchCounters.add(response.getValue().getBatchCounters());
cumulativeCounters.add(response.getValue().getAggregateCounters());
}
} |
Why was this test changed? | public void analyzeActionsStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeActionsStringInputRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW,
getRecognizePiiEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getAnalyzeSentimentResultCollectionForActions(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
} | IterableStream.of(null), | public void analyzeActionsStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeActionsStringInputRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW,
getRecognizePiiEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getAnalyzeSentimentResultCollectionForActions(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
} | class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase {
private static final Duration DEFAULT_TIMEOUT = Duration.ofSeconds(30);
private TextAnalyticsAsyncClient client;
private HttpClient buildAsyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertAsync()
.build();
}
private TextAnalyticsAsyncClient getTextAnalyticsAsyncClient(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion, boolean isStaticResource) {
return getTextAnalyticsClientBuilder(
buildAsyncAssertingClient(interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient),
serviceVersion,
isStaticResource)
.buildAsyncClient();
}
/**
* Verify that we can get statistics on the collection result when given a batch of documents with request options.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageShowStatisticsRunner((inputs, options) ->
StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options))
.assertNext(response ->
validateDetectLanguageResultCollectionWithResponse(true, getExpectedBatchDetectedLanguages(),
200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Test to detect language for each {@code DetectLanguageResult} input of a batch.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageRunner((inputs) ->
StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null))
.assertNext(response ->
validateDetectLanguageResultCollectionWithResponse(false, getExpectedBatchDetectedLanguages(),
200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Test to detect language for each string input of batch with given country hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguagesCountryHintRunner((inputs, countryHint) ->
StepVerifier.create(client.detectLanguageBatch(inputs, countryHint, null))
.assertNext(actualResults ->
validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), actualResults))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Test to detect language for each string input of batch with request options.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) ->
StepVerifier.create(client.detectLanguageBatch(inputs, null, options))
.assertNext(response -> validateDetectLanguageResultCollection(true, getExpectedBatchDetectedLanguages(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Test to detect language for each string input of batch.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageStringInputRunner((inputs) ->
StepVerifier.create(client.detectLanguageBatch(inputs, null, null))
.assertNext(response -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that a single DetectedLanguage is returned for a document to detectLanguage.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectSingleTextLanguageRunner(input ->
StepVerifier.create(client.detectLanguage(input))
.assertNext(response -> validatePrimaryLanguage(getDetectedLanguageEnglish(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an TextAnalyticsException is thrown for a document with invalid country hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageInvalidCountryHintRunner((input, countryHint) ->
StepVerifier.create(client.detectLanguage(input, countryHint))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_COUNTRY_HINT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that TextAnalyticsException is thrown for an empty document.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(input ->
StepVerifier.create(client.detectLanguage(input))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that a bad request exception is returned for input documents with same ids.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageDuplicateIdRunner((inputs, options) ->
StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an invalid document exception is returned for input documents with an empty ID.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageInputEmptyIdRunner(inputs ->
StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that with countryHint with empty string will not throw exception.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageEmptyCountryHintRunner((input, countryHint) ->
StepVerifier.create(client.detectLanguage(input, countryHint))
.assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that with countryHint with "none" will not throw exception.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageNoneCountryHintRunner((input, countryHint) ->
StepVerifier.create(client.detectLanguage(input, countryHint))
.assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeCategorizedEntitiesForSingleTextInputRunner(input ->
StepVerifier.create(client.recognizeEntities(input))
.assertNext(response -> validateCategorizedEntities(response.stream().collect(Collectors.toList())))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(input ->
StepVerifier.create(client.recognizeEntities(input))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntitySingleErrorRunner((inputs) ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null))
.assertNext(resultCollection -> resultCollection.getValue().forEach(recognizeEntitiesResult -> {
Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities);
assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntityRunner((inputs) ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null))
.assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, response))
.verifyComplete());
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, options))
.assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeCategorizedEntityStringInputRunner((inputs) ->
StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null))
.assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.recognizeEntitiesBatch(inputs, language, null))
.assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options))
.assertNext(response -> validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@Disabled("https:
public void recognizeEntitiesBatchWithResponseEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(15, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(22, categorizedEntity.getOffset());
})).verifyComplete(), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(30, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(14, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(15, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(126, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizePiiSingleDocumentRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(response -> validatePiiEntities(getPiiEntitiesList1(), response.stream().collect(Collectors.toList())))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitySingleErrorRunner((inputs) ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null))
.assertNext(resultCollection -> resultCollection.getValue().forEach(recognizePiiEntitiesResult -> {
Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities);
assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesRunner((inputs) ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null))
.assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, options))
.assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizePiiLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language, null))
.assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, options))
.assertNext(response -> validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(10, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(17, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(25, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(9, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(10, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(121, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizePiiDomainFilterRunner((document, options) ->
StepVerifier.create(client.recognizePiiEntities(document, "en", options))
.assertNext(response -> validatePiiEntities(getPiiEntitiesList1ForDomainFilter(),
response.stream().collect(Collectors.toList())))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputStringForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizePiiLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language,
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION)))
.assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntitiesForDomainFilter(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesRunner((inputs) ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs,
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION)))
.assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntitiesForDomainFilter(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputForCategoriesFilter(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
(inputs, options) ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, "en", options))
.assertNext(
resultCollection -> validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntityWithCategoriesFilterFromOtherResult(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
(inputs, options) -> {
List<PiiEntityCategory> categories = new ArrayList<>();
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, "en", options))
.assertNext(
resultCollection -> {
resultCollection.forEach(result -> result.getEntities().forEach(piiEntity -> {
final PiiEntityCategory category = piiEntity.getCategory();
if (PiiEntityCategory.ABA_ROUTING_NUMBER == category
|| PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER == category) {
categories.add(category);
}
}));
validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection);
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
final PiiEntityCategory[] piiEntityCategories = categories.toArray(new PiiEntityCategory[categories.size()]);
options.setCategoriesFilter(piiEntityCategories);
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, "en", options))
.assertNext(
resultCollection -> validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeLinkedEntitiesForSingleTextInputRunner(input ->
StepVerifier.create(client.recognizeLinkedEntities(input))
.assertNext(response -> validateLinkedEntity(getLinkedEntitiesList1().get(0), response.iterator().next()))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(input ->
StepVerifier.create(client.recognizeLinkedEntities(input))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchLinkedEntityRunner((inputs) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null))
.assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(false,
getExpectedBatchLinkedEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, options))
.assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeLinkedStringInputRunner((inputs) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null))
.assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeLinkedLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language, null))
.assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options))
.assertNext(response -> validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(15, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(22, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(30, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(14, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(15, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(126, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractKeyPhrasesForSingleTextInputRunner(input ->
StepVerifier.create(client.extractKeyPhrases(input))
.assertNext(keyPhrasesCollection -> validateKeyPhrases(asList("monde"),
keyPhrasesCollection.stream().collect(Collectors.toList())))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(input ->
StepVerifier.create(client.extractKeyPhrases(input))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractBatchKeyPhrasesRunner((inputs) ->
StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null))
.assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractBatchKeyPhrasesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, options))
.assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractKeyPhrasesStringInputRunner((inputs) ->
StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null))
.assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractKeyPhrasesLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language, null))
.assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options))
.assertNext(response -> validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
/**
* Test analyzing sentiment for a string input.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentForSingleTextInputRunner(input ->
StepVerifier.create(client.analyzeSentiment(input))
.assertNext(response -> validateDocumentSentiment(false, getExpectedDocumentSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT)
);
}
/**
* Test analyzing sentiment for a string input with default language hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInputWithDefaultLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentForSingleTextInputRunner(input ->
StepVerifier.create(client.analyzeSentiment(input, null))
.assertNext(response -> validateDocumentSentiment(false, getExpectedDocumentSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT)
);
}
/**
* Test analyzing sentiment for a string input and verifying the result of opinion mining.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInputWithOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentForTextInputWithOpinionMiningRunner((input, options) ->
StepVerifier.create(client.analyzeSentiment(input, "en", options))
.assertNext(response -> validateDocumentSentiment(true, getExpectedDocumentSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an TextAnalyticsException is thrown for an empty document.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(document ->
StepVerifier.create(client.analyzeSentiment(document))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT)
);
}
/**
* Test analyzing sentiment for a duplicate ID list.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, new TextAnalyticsRequestOptions()))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an invalid document exception is returned for input documents with an empty ID.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* String documents with null TextAnalyticsRequestOptions and null language code which will use the default language
* code, 'en'.
*
* {@link TextAnalyticsAsyncClient
* which TextAnalyticsRequestOptions is null and null language code which will use the default language code, 'en'.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentStringInputRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, new TextAnalyticsRequestOptions()))
.assertNext(response -> validateAnalyzeSentimentResultCollection(false, false,
getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* String documents with null TextAnalyticsRequestOptions and given a language code.
*
* {@link TextAnalyticsAsyncClient
* which TextAnalyticsRequestOptions is null and given a language code.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringWithLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, language, new TextAnalyticsRequestOptions()))
.assertNext(response -> validateAnalyzeSentimentResultCollection(false, false, getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result includes request statistics but not sentence options when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which to show the request statistics only and verify the analyzed sentiment result.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringShowStatisticsExcludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options.setIncludeOpinionMining(false)))
.assertNext(response -> validateAnalyzeSentimentResultCollection(true, false, getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result includes sentence options but not request statistics when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) -> {
options.setIncludeStatistics(false);
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options))
.assertNext(response -> validateAnalyzeSentimentResultCollection(false, true, getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
});
}
/**
* Verify that the collection result includes sentence options and request statistics when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringShowStatisticsAndIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options))
.assertNext(response -> validateAnalyzeSentimentResultCollection(true, true, getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* TextDocumentInput documents with null TextAnalyticsRequestOptions.
*
* {@link TextAnalyticsAsyncClient
* which TextAnalyticsRequestOptions is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputWithNullRequestOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, (TextAnalyticsRequestOptions) null))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(false, false, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that we can get statistics on the collection result when given a batch of
* TextDocumentInput documents with TextAnalyticsRequestOptions.
*
* {@link TextAnalyticsAsyncClient
* which TextAnalyticsRequestOptions includes request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentShowStatsRunner((inputs, requestOptions) ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, requestOptions))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(true, false, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* TextDocumentInput documents with null AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputWithNullAnalyzeSentimentOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, (AnalyzeSentimentOptions) null))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(false, false, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result includes request statistics but not sentence options when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes request statistics but not opinion mining.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatisticsExcludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options.setIncludeOpinionMining(false)))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(true, false, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result includes sentence options but not request statistics when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes opinion mining but not request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) -> {
options.setIncludeStatistics(false);
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options))
.assertNext(response ->
validateAnalyzeSentimentResultCollectionWithResponse(false, true, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
});
}
/**
* Verify that the collection result includes sentence options and request statistics when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatisticsAndIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(true, true, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an InvalidDocumentBatch exception is returned for input documents with too many documents.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(opinionSentiment -> {
assertEquals(7, opinionSentiment.getLength());
assertEquals(17, opinionSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiWithSkinToneModifier(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(27, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(19, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(9, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(
result -> result.getSentences().forEach(
sentenceSentiment -> {
assertEquals(34, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(26, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(16, targetSentiment.getOffset());
});
})
)
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(
result -> result.getSentences().forEach(
sentenceSentiment -> {
assertEquals(42, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(34, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(24, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(26, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(18, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(8, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(
sentenceSentiment -> {
assertEquals(27, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(19, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(9, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(138, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(130, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(120, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareStringInputWithoutOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
healthcareStringInputRunner((documents, dummyOptions) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
false,
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareStringInputWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
healthcareStringInputRunner((documents, options) -> {
boolean isValidApiVersionForDisplayName = serviceVersion != TextAnalyticsServiceVersion.V3_0
&& serviceVersion != TextAnalyticsServiceVersion.V3_1;
if (isValidApiVersionForDisplayName) {
options.setDisplayName("operationName");
}
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<AnalyzeHealthcareEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
if (isValidApiVersionForDisplayName) {
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
}
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
healthcareLroRunner((documents, options) -> {
boolean isValidApiVersionForDisplayName = serviceVersion != TextAnalyticsServiceVersion.V3_0
&& serviceVersion != TextAnalyticsServiceVersion.V3_1;
if (isValidApiVersionForDisplayName) {
options.setDisplayName("operationName");
}
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<AnalyzeHealthcareEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
if (isValidApiVersionForDisplayName) {
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
}
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareLroPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
healthcareLroPaginationRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForMultiplePages(0, 10, 0),
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList()));
}, 10);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareLroEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyListRunner((documents, errorMessage) -> {
StepVerifier.create(client.beginAnalyzeHealthcareEntities(documents, null))
.expectErrorMatches(throwable -> throwable instanceof IllegalArgumentException
&& errorMessage.equals(throwable.getMessage()))
.verify(DEFAULT_TIMEOUT);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@Disabled("https:
public void analyzeHealthcareEntitiesEmojiUnicodeCodePoint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(22, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(29, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(37, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesDiacriticsNfc(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(21, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesDiacriticsNfd(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(22, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(133, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesForAssertion(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeHealthcareEntitiesForAssertionRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
final HealthcareEntityAssertion assertion =
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList())
.get(0).stream().collect(Collectors.toList())
.get(0).getEntities().stream().collect(Collectors.toList())
.get(1)
.getAssertion();
assertEquals(EntityConditionality.HYPOTHETICAL, assertion.getConditionality());
assertNull(assertion.getAssociation());
assertNull(assertion.getCertainty());
});
}
@Disabled("Temporary disable it for green test")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void cancelHealthcareLro(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
cancelHealthcareLroRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.cancelOperation();
LongRunningOperationStatus operationStatus = syncPoller.poll().getStatus();
while (!LongRunningOperationStatus.USER_CANCELLED.equals(operationStatus)) {
operationStatus = syncPoller.poll().getStatus();
}
syncPoller.waitForCompletion();
Assertions.assertEquals(LongRunningOperationStatus.USER_CANCELLED, operationStatus);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchActionsRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW,
getRecognizePiiEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getAnalyzeSentimentResultCollectionForActions(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithMultiSameKindActions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeActionsWithMultiSameKindActionsRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(actionsResult -> {
assertEquals(2, actionsResult.getRecognizeEntitiesResults().stream().count());
assertEquals(2, actionsResult.getRecognizePiiEntitiesResults().stream().count());
assertEquals(2, actionsResult.getRecognizeLinkedEntitiesResults().stream().count());
assertEquals(2, actionsResult.getAnalyzeSentimentResults().stream().count());
assertEquals(2, actionsResult.getExtractKeyPhrasesResults().stream().count());
});
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithActionNames(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeActionsWithActionNamesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(actionsResult -> {
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getRecognizeEntitiesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getRecognizePiiEntitiesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getAnalyzeSentimentResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getExtractKeyPhrasesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
});
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchActionsPaginationRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux>
syncPoller = client.beginAnalyzeActions(
documents, tasks, new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
getExpectedAnalyzeActionsResultListForMultiplePages(0, 20, 2),
result.toStream().collect(Collectors.toList()));
}, 22);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyListRunner((documents, errorMessage) ->
StepVerifier.create(client.beginAnalyzeActions(documents,
new TextAnalyticsActions()
.setRecognizeEntitiesActions(new RecognizeEntitiesAction()), null))
.expectErrorMatches(throwable -> throwable instanceof IllegalArgumentException
&& errorMessage.equals(throwable.getMessage()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeEntitiesRecognitionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeEntitiesRecognitionRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
}
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzePiiEntityRecognitionWithCategoriesFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzePiiEntityRecognitionWithCategoriesFiltersRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getExpectedBatchPiiEntitiesForCategoriesFilter(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
}
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzePiiEntityRecognitionWithDomainFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzePiiEntityRecognitionWithDomainFiltersRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getExpectedBatchPiiEntitiesForDomainFilter(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
}
);
}
@Disabled("Linked entity action do not work")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeLinkedEntityActions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeLinkedEntityRecognitionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(
false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizeLinkedEntitiesActionResult(false, null,
TIME_NOW, getRecognizeLinkedEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeKeyPhrasesExtractionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractKeyPhrasesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getExpectedBatchTextSentiment(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeHealthcareEntitiesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedAnalyzeHealthcareEntitiesActionResult(false, null, TIME_NOW,
getExpectedAnalyzeHealthcareEntitiesResultCollection(2,
asList(
getRecognizeHealthcareEntitiesResult1("0"),
getRecognizeHealthcareEntitiesResult2())),
null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntitiesAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getRecognizeCustomEntitiesResults().forEach(
customEntitiesActionResult -> customEntitiesActionResult.getDocumentsResults().forEach(
documentResult -> validateCategorizedEntities(
documentResult.getEntities().stream().collect(Collectors.toList())))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassificationAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomSingleCategoryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getSingleLabelClassifyResults().forEach(
customSingleCategoryActionResult -> customSingleCategoryActionResult.getDocumentsResults().forEach(
documentResult -> validateLabelClassificationResult(documentResult))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiCategoryClassifyAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomMultiCategoryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getMultiLabelClassifyResults().forEach(
customMultiCategoryActionResult -> customMultiCategoryActionResult.getDocumentsResults().forEach(
documentResult -> validateLabelClassificationResult(documentResult))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntitiesStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesRunner((documents, parameters) -> {
SyncPoller<RecognizeCustomEntitiesOperationDetail, RecognizeCustomEntitiesPagedFlux> syncPoller =
client.beginRecognizeCustomEntities(documents, parameters.get(0), parameters.get(1)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
RecognizeCustomEntitiesPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult ->
validateCategorizedEntities(documentResult.getEntities().stream().collect(Collectors.toList()))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntities(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesRunner((documents, parameters) -> {
RecognizeCustomEntitiesOptions options = new RecognizeCustomEntitiesOptions()
.setDisplayName("operationName");
SyncPoller<RecognizeCustomEntitiesOperationDetail, RecognizeCustomEntitiesPagedFlux> syncPoller =
client.beginRecognizeCustomEntities(documents, parameters.get(0), parameters.get(1), "en", options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<RecognizeCustomEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
RecognizeCustomEntitiesPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult ->
validateCategorizedEntities(documentResult.getEntities().stream().collect(Collectors.toList()))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassificationStringInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomSingleLabelRunner((documents, parameters) -> {
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedFlux> syncPoller =
client.beginSingleLabelClassify(documents, parameters.get(0), parameters.get(1))
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ClassifyDocumentPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomSingleLabelRunner((documents, parameters) -> {
SingleLabelClassifyOptions options = new SingleLabelClassifyOptions().setDisplayName("operationName");
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedFlux> syncPoller =
client.beginSingleLabelClassify(documents, parameters.get(0), parameters.get(1), "en", options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<ClassifyDocumentOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
ClassifyDocumentPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiLabelClassificationStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomMultiLabelRunner((documents, parameters) -> {
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedFlux> syncPoller =
client.beginMultiLabelClassify(documents, parameters.get(0), parameters.get(1))
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ClassifyDocumentPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomMultiLabelRunner((documents, parameters) -> {
MultiLabelClassifyOptions options = new MultiLabelClassifyOptions().setDisplayName("operationName");
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedFlux> syncPoller =
client.beginMultiLabelClassify(documents, parameters.get(0), parameters.get(1), "en", options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<ClassifyDocumentOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
ClassifyDocumentPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExtractiveSummaryActionResult(false, null,
TIME_NOW,
getExpectedExtractiveSummaryResultCollection(getExpectedExtractiveSummaryResultSortByOffset()),
null))),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
}, null, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionSortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(isAscendingOrderByOffSet(
documentResult.getSentences().stream().collect(Collectors.toList()))))));
}, 4, ExtractiveSummarySentencesOrder.OFFSET);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionSortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(isDescendingOrderByRankScore(
documentResult.getSentences().stream().collect(Collectors.toList()))))));
}, 4, ExtractiveSummarySentencesOrder.RANK);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithSentenceCountLessThanMaxCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(
documentResult.getSentences().stream().collect(Collectors.toList()).size() < 20))));
}, 20, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithNonDefaultSentenceCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertEquals(
documentResult.getSentences().stream().collect(Collectors.toList()).size(), 5))));
}, 5, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionMaxSentenceCountInvalidRangeException(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
int[] invalidMaxSentenceCounts = {0, 21};
for (int invalidCount: invalidMaxSentenceCounts) {
extractiveSummaryActionRunner(
(documents, tasks) -> {
HttpResponseException exception = assertThrows(HttpResponseException.class, () -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions())
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
});
assertEquals(
TextAnalyticsErrorCode.INVALID_PARAMETER_VALUE,
((TextAnalyticsError) exception.getValue()).getErrorCode());
}, invalidCount, null);
}
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeAbstractiveSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
abstractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getAbstractiveSummaryActionResult(false, null,
TIME_NOW,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
null
)))
)),
result.toStream().collect(Collectors.toList()));
}, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryDuplicateIdInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
StepVerifier.create(client.beginAbstractSummary(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT);
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
StepVerifier.create(client.beginAbstractSummary(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT);
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
StepVerifier.create(client.beginAbstractSummary(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
abstractiveSummaryRunner((documents, options) -> {
SyncPoller<AbstractiveSummaryOperationDetail, AbstractiveSummaryPagedFlux> syncPoller =
client.beginAbstractSummary(documents)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AbstractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResult -> validateAbstractiveSummaryResultCollection(false,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
documentResult));
}, 4);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
abstractiveSummaryMaxOverloadRunner((documents, options) -> {
SyncPoller<AbstractiveSummaryOperationDetail, AbstractiveSummaryPagedFlux> syncPoller =
client.beginAbstractSummary(documents, options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AbstractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResult -> validateAbstractiveSummaryResultCollection(false,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
documentResult));
}, 4);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
isAscendingOrderByOffSet(documentResult.getSentences().stream().collect(Collectors.toList())))
));
}, 4, ExtractiveSummarySentencesOrder.OFFSET);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
isDescendingOrderByRankScore(documentResult.getSentences().stream().collect(Collectors.toList())))
));
}, 4, ExtractiveSummarySentencesOrder.RANK);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySentenceCountLessThanMaxCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
documentResult.getSentences().stream().collect(Collectors.toList()).size() < 20)));
}, 20, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummaryNonDefaultSentenceCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertEquals(
documentResult.getSentences().stream().collect(Collectors.toList()).size(), 5)));
}, 5, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummaryMaxSentenceCountInvalidRangeException(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
int[] invalidMaxSentenceCounts = {0, 21};
for (int invalidCount: invalidMaxSentenceCounts) {
extractiveSummaryRunner(
(documents, options) -> {
HttpResponseException exception = assertThrows(HttpResponseException.class, () -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
});
assertEquals(
TextAnalyticsErrorCode.INVALID_PARAMETER_VALUE,
((TextAnalyticsError) exception.getValue()).getErrorCode());
}, invalidCount, null);
}
}
} | class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase {
private static final Duration DEFAULT_TIMEOUT = Duration.ofSeconds(30);
private TextAnalyticsAsyncClient client;
private HttpClient buildAsyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertAsync()
.build();
}
private TextAnalyticsAsyncClient getTextAnalyticsAsyncClient(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion, boolean isStaticResource) {
return getTextAnalyticsClientBuilder(
buildAsyncAssertingClient(interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient),
serviceVersion,
isStaticResource)
.buildAsyncClient();
}
/**
* Verify that we can get statistics on the collection result when given a batch of documents with request options.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageShowStatisticsRunner((inputs, options) ->
StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options))
.assertNext(response ->
validateDetectLanguageResultCollectionWithResponse(true, getExpectedBatchDetectedLanguages(),
200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Test to detect language for each {@code DetectLanguageResult} input of a batch.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageRunner((inputs) ->
StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null))
.assertNext(response ->
validateDetectLanguageResultCollectionWithResponse(false, getExpectedBatchDetectedLanguages(),
200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Test to detect language for each string input of batch with given country hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguagesCountryHintRunner((inputs, countryHint) ->
StepVerifier.create(client.detectLanguageBatch(inputs, countryHint, null))
.assertNext(actualResults ->
validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), actualResults))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Test to detect language for each string input of batch with request options.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) ->
StepVerifier.create(client.detectLanguageBatch(inputs, null, options))
.assertNext(response -> validateDetectLanguageResultCollection(true, getExpectedBatchDetectedLanguages(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Test to detect language for each string input of batch.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageStringInputRunner((inputs) ->
StepVerifier.create(client.detectLanguageBatch(inputs, null, null))
.assertNext(response -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that a single DetectedLanguage is returned for a document to detectLanguage.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectSingleTextLanguageRunner(input ->
StepVerifier.create(client.detectLanguage(input))
.assertNext(response -> validatePrimaryLanguage(getDetectedLanguageEnglish(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an TextAnalyticsException is thrown for a document with invalid country hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageInvalidCountryHintRunner((input, countryHint) ->
StepVerifier.create(client.detectLanguage(input, countryHint))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_COUNTRY_HINT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that TextAnalyticsException is thrown for an empty document.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(input ->
StepVerifier.create(client.detectLanguage(input))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that a bad request exception is returned for input documents with same ids.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageDuplicateIdRunner((inputs, options) ->
StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an invalid document exception is returned for input documents with an empty ID.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageInputEmptyIdRunner(inputs ->
StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that with countryHint with empty string will not throw exception.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageEmptyCountryHintRunner((input, countryHint) ->
StepVerifier.create(client.detectLanguage(input, countryHint))
.assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that with countryHint with "none" will not throw exception.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageNoneCountryHintRunner((input, countryHint) ->
StepVerifier.create(client.detectLanguage(input, countryHint))
.assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeCategorizedEntitiesForSingleTextInputRunner(input ->
StepVerifier.create(client.recognizeEntities(input))
.assertNext(response -> validateCategorizedEntities(response.stream().collect(Collectors.toList())))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(input ->
StepVerifier.create(client.recognizeEntities(input))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntitySingleErrorRunner((inputs) ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null))
.assertNext(resultCollection -> resultCollection.getValue().forEach(recognizeEntitiesResult -> {
Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities);
assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntityRunner((inputs) ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null))
.assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, response))
.verifyComplete());
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, options))
.assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeCategorizedEntityStringInputRunner((inputs) ->
StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null))
.assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.recognizeEntitiesBatch(inputs, language, null))
.assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options))
.assertNext(response -> validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@Disabled("https:
public void recognizeEntitiesBatchWithResponseEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(15, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(22, categorizedEntity.getOffset());
})).verifyComplete(), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(30, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(14, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(15, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(126, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizePiiSingleDocumentRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(response -> validatePiiEntities(getPiiEntitiesList1(), response.stream().collect(Collectors.toList())))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitySingleErrorRunner((inputs) ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null))
.assertNext(resultCollection -> resultCollection.getValue().forEach(recognizePiiEntitiesResult -> {
Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities);
assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesRunner((inputs) ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null))
.assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, options))
.assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizePiiLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language, null))
.assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, options))
.assertNext(response -> validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(10, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(17, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(25, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(9, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(10, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(121, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizePiiDomainFilterRunner((document, options) ->
StepVerifier.create(client.recognizePiiEntities(document, "en", options))
.assertNext(response -> validatePiiEntities(getPiiEntitiesList1ForDomainFilter(),
response.stream().collect(Collectors.toList())))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputStringForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizePiiLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language,
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION)))
.assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntitiesForDomainFilter(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesRunner((inputs) ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs,
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION)))
.assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntitiesForDomainFilter(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputForCategoriesFilter(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
(inputs, options) ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, "en", options))
.assertNext(
resultCollection -> validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntityWithCategoriesFilterFromOtherResult(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
(inputs, options) -> {
List<PiiEntityCategory> categories = new ArrayList<>();
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, "en", options))
.assertNext(
resultCollection -> {
resultCollection.forEach(result -> result.getEntities().forEach(piiEntity -> {
final PiiEntityCategory category = piiEntity.getCategory();
if (PiiEntityCategory.ABA_ROUTING_NUMBER == category
|| PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER == category) {
categories.add(category);
}
}));
validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection);
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
final PiiEntityCategory[] piiEntityCategories = categories.toArray(new PiiEntityCategory[categories.size()]);
options.setCategoriesFilter(piiEntityCategories);
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, "en", options))
.assertNext(
resultCollection -> validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeLinkedEntitiesForSingleTextInputRunner(input ->
StepVerifier.create(client.recognizeLinkedEntities(input))
.assertNext(response -> validateLinkedEntity(getLinkedEntitiesList1().get(0), response.iterator().next()))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(input ->
StepVerifier.create(client.recognizeLinkedEntities(input))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchLinkedEntityRunner((inputs) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null))
.assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(false,
getExpectedBatchLinkedEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, options))
.assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeLinkedStringInputRunner((inputs) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null))
.assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeLinkedLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language, null))
.assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options))
.assertNext(response -> validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(15, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(22, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(30, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(14, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(15, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(126, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractKeyPhrasesForSingleTextInputRunner(input ->
StepVerifier.create(client.extractKeyPhrases(input))
.assertNext(keyPhrasesCollection -> validateKeyPhrases(asList("monde"),
keyPhrasesCollection.stream().collect(Collectors.toList())))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(input ->
StepVerifier.create(client.extractKeyPhrases(input))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractBatchKeyPhrasesRunner((inputs) ->
StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null))
.assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractBatchKeyPhrasesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, options))
.assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractKeyPhrasesStringInputRunner((inputs) ->
StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null))
.assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractKeyPhrasesLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language, null))
.assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options))
.assertNext(response -> validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
/**
* Test analyzing sentiment for a string input.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentForSingleTextInputRunner(input ->
StepVerifier.create(client.analyzeSentiment(input))
.assertNext(response -> validateDocumentSentiment(false, getExpectedDocumentSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT)
);
}
/**
* Test analyzing sentiment for a string input with default language hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInputWithDefaultLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentForSingleTextInputRunner(input ->
StepVerifier.create(client.analyzeSentiment(input, null))
.assertNext(response -> validateDocumentSentiment(false, getExpectedDocumentSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT)
);
}
/**
* Test analyzing sentiment for a string input and verifying the result of opinion mining.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInputWithOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentForTextInputWithOpinionMiningRunner((input, options) ->
StepVerifier.create(client.analyzeSentiment(input, "en", options))
.assertNext(response -> validateDocumentSentiment(true, getExpectedDocumentSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an TextAnalyticsException is thrown for an empty document.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(document ->
StepVerifier.create(client.analyzeSentiment(document))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT)
);
}
/**
* Test analyzing sentiment for a duplicate ID list.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, new TextAnalyticsRequestOptions()))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an invalid document exception is returned for input documents with an empty ID.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* String documents with null TextAnalyticsRequestOptions and null language code which will use the default language
* code, 'en'.
*
* {@link TextAnalyticsAsyncClient
* which TextAnalyticsRequestOptions is null and null language code which will use the default language code, 'en'.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentStringInputRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, new TextAnalyticsRequestOptions()))
.assertNext(response -> validateAnalyzeSentimentResultCollection(false, false,
getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* String documents with null TextAnalyticsRequestOptions and given a language code.
*
* {@link TextAnalyticsAsyncClient
* which TextAnalyticsRequestOptions is null and given a language code.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringWithLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, language, new TextAnalyticsRequestOptions()))
.assertNext(response -> validateAnalyzeSentimentResultCollection(false, false, getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result includes request statistics but not sentence options when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which to show the request statistics only and verify the analyzed sentiment result.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringShowStatisticsExcludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options.setIncludeOpinionMining(false)))
.assertNext(response -> validateAnalyzeSentimentResultCollection(true, false, getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result includes sentence options but not request statistics when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) -> {
options.setIncludeStatistics(false);
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options))
.assertNext(response -> validateAnalyzeSentimentResultCollection(false, true, getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
});
}
/**
* Verify that the collection result includes sentence options and request statistics when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringShowStatisticsAndIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options))
.assertNext(response -> validateAnalyzeSentimentResultCollection(true, true, getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* TextDocumentInput documents with null TextAnalyticsRequestOptions.
*
* {@link TextAnalyticsAsyncClient
* which TextAnalyticsRequestOptions is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputWithNullRequestOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, (TextAnalyticsRequestOptions) null))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(false, false, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that we can get statistics on the collection result when given a batch of
* TextDocumentInput documents with TextAnalyticsRequestOptions.
*
* {@link TextAnalyticsAsyncClient
* which TextAnalyticsRequestOptions includes request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentShowStatsRunner((inputs, requestOptions) ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, requestOptions))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(true, false, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* TextDocumentInput documents with null AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputWithNullAnalyzeSentimentOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, (AnalyzeSentimentOptions) null))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(false, false, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result includes request statistics but not sentence options when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes request statistics but not opinion mining.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatisticsExcludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options.setIncludeOpinionMining(false)))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(true, false, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result includes sentence options but not request statistics when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes opinion mining but not request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) -> {
options.setIncludeStatistics(false);
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options))
.assertNext(response ->
validateAnalyzeSentimentResultCollectionWithResponse(false, true, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
});
}
/**
* Verify that the collection result includes sentence options and request statistics when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatisticsAndIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(true, true, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an InvalidDocumentBatch exception is returned for input documents with too many documents.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(opinionSentiment -> {
assertEquals(7, opinionSentiment.getLength());
assertEquals(17, opinionSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiWithSkinToneModifier(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(27, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(19, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(9, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(
result -> result.getSentences().forEach(
sentenceSentiment -> {
assertEquals(34, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(26, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(16, targetSentiment.getOffset());
});
})
)
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(
result -> result.getSentences().forEach(
sentenceSentiment -> {
assertEquals(42, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(34, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(24, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(26, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(18, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(8, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(
sentenceSentiment -> {
assertEquals(27, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(19, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(9, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(138, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(130, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(120, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareStringInputWithoutOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
healthcareStringInputRunner((documents, dummyOptions) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
false,
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareStringInputWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
healthcareStringInputRunner((documents, options) -> {
boolean isValidApiVersionForDisplayName = serviceVersion != TextAnalyticsServiceVersion.V3_0
&& serviceVersion != TextAnalyticsServiceVersion.V3_1;
if (isValidApiVersionForDisplayName) {
options.setDisplayName("operationName");
}
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<AnalyzeHealthcareEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
if (isValidApiVersionForDisplayName) {
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
}
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
healthcareLroRunner((documents, options) -> {
boolean isValidApiVersionForDisplayName = serviceVersion != TextAnalyticsServiceVersion.V3_0
&& serviceVersion != TextAnalyticsServiceVersion.V3_1;
if (isValidApiVersionForDisplayName) {
options.setDisplayName("operationName");
}
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<AnalyzeHealthcareEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
if (isValidApiVersionForDisplayName) {
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
}
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareLroPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
healthcareLroPaginationRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForMultiplePages(0, 10, 0),
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList()));
}, 10);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareLroEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyListRunner((documents, errorMessage) -> {
StepVerifier.create(client.beginAnalyzeHealthcareEntities(documents, null))
.expectErrorMatches(throwable -> throwable instanceof IllegalArgumentException
&& errorMessage.equals(throwable.getMessage()))
.verify(DEFAULT_TIMEOUT);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@Disabled("https:
public void analyzeHealthcareEntitiesEmojiUnicodeCodePoint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(22, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(29, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(37, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesDiacriticsNfc(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(21, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesDiacriticsNfd(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(22, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(133, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesForAssertion(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeHealthcareEntitiesForAssertionRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
final HealthcareEntityAssertion assertion =
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList())
.get(0).stream().collect(Collectors.toList())
.get(0).getEntities().stream().collect(Collectors.toList())
.get(1)
.getAssertion();
assertEquals(EntityConditionality.HYPOTHETICAL, assertion.getConditionality());
assertNull(assertion.getAssociation());
assertNull(assertion.getCertainty());
});
}
@Disabled("Temporary disable it for green test")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void cancelHealthcareLro(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
cancelHealthcareLroRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.cancelOperation();
LongRunningOperationStatus operationStatus = syncPoller.poll().getStatus();
while (!LongRunningOperationStatus.USER_CANCELLED.equals(operationStatus)) {
operationStatus = syncPoller.poll().getStatus();
}
syncPoller.waitForCompletion();
Assertions.assertEquals(LongRunningOperationStatus.USER_CANCELLED, operationStatus);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchActionsRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW,
getRecognizePiiEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getAnalyzeSentimentResultCollectionForActions(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithMultiSameKindActions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeActionsWithMultiSameKindActionsRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(actionsResult -> {
assertEquals(2, actionsResult.getRecognizeEntitiesResults().stream().count());
assertEquals(2, actionsResult.getRecognizePiiEntitiesResults().stream().count());
assertEquals(2, actionsResult.getRecognizeLinkedEntitiesResults().stream().count());
assertEquals(2, actionsResult.getAnalyzeSentimentResults().stream().count());
assertEquals(2, actionsResult.getExtractKeyPhrasesResults().stream().count());
});
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithActionNames(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeActionsWithActionNamesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(actionsResult -> {
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getRecognizeEntitiesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getRecognizePiiEntitiesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getAnalyzeSentimentResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getExtractKeyPhrasesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
});
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchActionsPaginationRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux>
syncPoller = client.beginAnalyzeActions(
documents, tasks, new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
getExpectedAnalyzeActionsResultListForMultiplePages(0, 20, 2),
result.toStream().collect(Collectors.toList()));
}, 22);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyListRunner((documents, errorMessage) ->
StepVerifier.create(client.beginAnalyzeActions(documents,
new TextAnalyticsActions()
.setRecognizeEntitiesActions(new RecognizeEntitiesAction()), null))
.expectErrorMatches(throwable -> throwable instanceof IllegalArgumentException
&& errorMessage.equals(throwable.getMessage()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeEntitiesRecognitionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeEntitiesRecognitionRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
}
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzePiiEntityRecognitionWithCategoriesFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzePiiEntityRecognitionWithCategoriesFiltersRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getExpectedBatchPiiEntitiesForCategoriesFilter(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
}
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzePiiEntityRecognitionWithDomainFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzePiiEntityRecognitionWithDomainFiltersRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getExpectedBatchPiiEntitiesForDomainFilter(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
}
);
}
@Disabled("Linked entity action do not work")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeLinkedEntityActions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeLinkedEntityRecognitionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(
false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizeLinkedEntitiesActionResult(false, null,
TIME_NOW, getRecognizeLinkedEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeKeyPhrasesExtractionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractKeyPhrasesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getExpectedBatchTextSentiment(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeHealthcareEntitiesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedAnalyzeHealthcareEntitiesActionResult(false, null, TIME_NOW,
getExpectedAnalyzeHealthcareEntitiesResultCollection(2,
asList(
getRecognizeHealthcareEntitiesResult1("0"),
getRecognizeHealthcareEntitiesResult2())),
null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntitiesAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getRecognizeCustomEntitiesResults().forEach(
customEntitiesActionResult -> customEntitiesActionResult.getDocumentsResults().forEach(
documentResult -> validateCategorizedEntities(
documentResult.getEntities().stream().collect(Collectors.toList())))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassificationAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomSingleCategoryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getSingleLabelClassifyResults().forEach(
customSingleCategoryActionResult -> customSingleCategoryActionResult.getDocumentsResults().forEach(
documentResult -> validateLabelClassificationResult(documentResult))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiCategoryClassifyAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomMultiCategoryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getMultiLabelClassifyResults().forEach(
customMultiCategoryActionResult -> customMultiCategoryActionResult.getDocumentsResults().forEach(
documentResult -> validateLabelClassificationResult(documentResult))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntitiesStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesRunner((documents, parameters) -> {
SyncPoller<RecognizeCustomEntitiesOperationDetail, RecognizeCustomEntitiesPagedFlux> syncPoller =
client.beginRecognizeCustomEntities(documents, parameters.get(0), parameters.get(1)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
RecognizeCustomEntitiesPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult ->
validateCategorizedEntities(documentResult.getEntities().stream().collect(Collectors.toList()))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntities(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesRunner((documents, parameters) -> {
RecognizeCustomEntitiesOptions options = new RecognizeCustomEntitiesOptions()
.setDisplayName("operationName");
SyncPoller<RecognizeCustomEntitiesOperationDetail, RecognizeCustomEntitiesPagedFlux> syncPoller =
client.beginRecognizeCustomEntities(documents, parameters.get(0), parameters.get(1), "en", options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<RecognizeCustomEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
RecognizeCustomEntitiesPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult ->
validateCategorizedEntities(documentResult.getEntities().stream().collect(Collectors.toList()))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassificationStringInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomSingleLabelRunner((documents, parameters) -> {
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedFlux> syncPoller =
client.beginSingleLabelClassify(documents, parameters.get(0), parameters.get(1))
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ClassifyDocumentPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomSingleLabelRunner((documents, parameters) -> {
SingleLabelClassifyOptions options = new SingleLabelClassifyOptions().setDisplayName("operationName");
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedFlux> syncPoller =
client.beginSingleLabelClassify(documents, parameters.get(0), parameters.get(1), "en", options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<ClassifyDocumentOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
ClassifyDocumentPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiLabelClassificationStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomMultiLabelRunner((documents, parameters) -> {
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedFlux> syncPoller =
client.beginMultiLabelClassify(documents, parameters.get(0), parameters.get(1))
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ClassifyDocumentPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomMultiLabelRunner((documents, parameters) -> {
MultiLabelClassifyOptions options = new MultiLabelClassifyOptions().setDisplayName("operationName");
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedFlux> syncPoller =
client.beginMultiLabelClassify(documents, parameters.get(0), parameters.get(1), "en", options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<ClassifyDocumentOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
ClassifyDocumentPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExtractiveSummaryActionResult(false, null,
TIME_NOW,
getExpectedExtractiveSummaryResultCollection(getExpectedExtractiveSummaryResultSortByOffset()),
null))),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
}, null, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionSortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(isAscendingOrderByOffSet(
documentResult.getSentences().stream().collect(Collectors.toList()))))));
}, 4, ExtractiveSummarySentencesOrder.OFFSET);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionSortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(isDescendingOrderByRankScore(
documentResult.getSentences().stream().collect(Collectors.toList()))))));
}, 4, ExtractiveSummarySentencesOrder.RANK);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithSentenceCountLessThanMaxCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(
documentResult.getSentences().stream().collect(Collectors.toList()).size() < 20))));
}, 20, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithNonDefaultSentenceCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertEquals(
documentResult.getSentences().stream().collect(Collectors.toList()).size(), 5))));
}, 5, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionMaxSentenceCountInvalidRangeException(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
int[] invalidMaxSentenceCounts = {0, 21};
for (int invalidCount: invalidMaxSentenceCounts) {
extractiveSummaryActionRunner(
(documents, tasks) -> {
HttpResponseException exception = assertThrows(HttpResponseException.class, () -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions())
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
});
assertEquals(
TextAnalyticsErrorCode.INVALID_PARAMETER_VALUE,
((TextAnalyticsError) exception.getValue()).getErrorCode());
}, invalidCount, null);
}
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeAbstractiveSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
abstractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getAbstractiveSummaryActionResult(false, null,
TIME_NOW,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
null
)))
)),
result.toStream().collect(Collectors.toList()));
}, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryDuplicateIdInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
StepVerifier.create(client.beginAbstractSummary(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT);
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
StepVerifier.create(client.beginAbstractSummary(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT);
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
StepVerifier.create(client.beginAbstractSummary(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
abstractiveSummaryRunner((documents, options) -> {
SyncPoller<AbstractiveSummaryOperationDetail, AbstractiveSummaryPagedFlux> syncPoller =
client.beginAbstractSummary(documents)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AbstractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResult -> validateAbstractiveSummaryResultCollection(false,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
documentResult));
}, 4);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
abstractiveSummaryMaxOverloadRunner((documents, options) -> {
SyncPoller<AbstractiveSummaryOperationDetail, AbstractiveSummaryPagedFlux> syncPoller =
client.beginAbstractSummary(documents, options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AbstractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResult -> validateAbstractiveSummaryResultCollection(false,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
documentResult));
}, 4);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
isAscendingOrderByOffSet(documentResult.getSentences().stream().collect(Collectors.toList())))
));
}, 4, ExtractiveSummarySentencesOrder.OFFSET);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
isDescendingOrderByRankScore(documentResult.getSentences().stream().collect(Collectors.toList())))
));
}, 4, ExtractiveSummarySentencesOrder.RANK);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySentenceCountLessThanMaxCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
documentResult.getSentences().stream().collect(Collectors.toList()).size() < 20)));
}, 20, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummaryNonDefaultSentenceCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertEquals(
documentResult.getSentences().stream().collect(Collectors.toList()).size(), 5)));
}, 5, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummaryMaxSentenceCountInvalidRangeException(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
int[] invalidMaxSentenceCounts = {0, 21};
for (int invalidCount: invalidMaxSentenceCounts) {
extractiveSummaryRunner(
(documents, options) -> {
HttpResponseException exception = assertThrows(HttpResponseException.class, () -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
});
assertEquals(
TextAnalyticsErrorCode.INVALID_PARAMETER_VALUE,
((TextAnalyticsError) exception.getValue()).getErrorCode());
}, invalidCount, null);
}
}
} |
Why was this test removed? | public void analyzeActionsStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeActionsStringInputRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getRecognizePiiEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getAnalyzeSentimentResultCollectionForActions(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
} | IterableStream.of(null), | public void analyzeActionsStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeActionsStringInputRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getRecognizePiiEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getAnalyzeSentimentResultCollectionForActions(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
} | class TextAnalyticsClientTest extends TextAnalyticsClientTestBase {
private TextAnalyticsClient client;
private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertSync()
.build();
}
private TextAnalyticsClient getTextAnalyticsClient(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion, boolean isStaticResource) {
return getTextAnalyticsClientBuilder(
buildSyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient),
serviceVersion,
isStaticResource)
.buildClient();
}
/**
* Verify that we can get statistics on the collection result when given a batch of documents with options.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageShowStatisticsRunner((inputs, options) -> validateDetectLanguageResultCollectionWithResponse(true,
getExpectedBatchDetectedLanguages(), 200,
client.detectLanguageBatchWithResponse(inputs, options, Context.NONE)));
}
/**
* Test Detect batch of documents languages.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageRunner((inputs) -> validateDetectLanguageResultCollectionWithResponse(false,
getExpectedBatchDetectedLanguages(), 200,
client.detectLanguageBatchWithResponse(inputs, null, Context.NONE)));
}
/**
* Test detect batch languages for a list of string input with country hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguagesCountryHintRunner((inputs, countryHint) -> validateDetectLanguageResultCollection(
false, getExpectedBatchDetectedLanguages(),
client.detectLanguageBatch(inputs, countryHint, null)));
}
/**
* Test detect batch languages for a list of string input with request options
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> validateDetectLanguageResultCollection(true,
getExpectedBatchDetectedLanguages(), client.detectLanguageBatch(inputs, null, options)));
}
/**
* Test detect batch languages for a list of string input.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageStringInputRunner((inputs) -> validateDetectLanguageResultCollection(
false, getExpectedBatchDetectedLanguages(), client.detectLanguageBatch(inputs, null, null)));
}
/**
* Verifies that a single DetectLanguageResult is returned for a document to detect language.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectSingleTextLanguageRunner(input ->
validatePrimaryLanguage(getDetectedLanguageEnglish(), client.detectLanguage(input)));
}
/**
* Verifies that a TextAnalyticsException is thrown for an empty document.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(input -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.detectLanguage(input));
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
/**
* Verifies that a bad request exception is returned for input documents with same IDs.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageDuplicateIdRunner((inputs, options) -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.detectLanguageBatchWithResponse(inputs, options, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
/**
* Verifies that an invalid document exception is returned for input documents with an empty ID.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageInputEmptyIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.detectLanguageBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
/**
* Verifies that a TextAnalyticsException is thrown for a document with invalid country hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageInvalidCountryHintRunner((input, countryHint) -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.detectLanguage(input, countryHint));
assertEquals(INVALID_COUNTRY_HINT, exception.getErrorCode());
});
}
/**
* Verify that with countryHint with empty string will not throw exception.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageEmptyCountryHintRunner((input, countryHint) ->
validatePrimaryLanguage(getDetectedLanguageSpanish(), client.detectLanguage(input, countryHint)));
}
/**
* Verify that with countryHint with "none" will not throw exception.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageNoneCountryHintRunner((input, countryHint) ->
validatePrimaryLanguage(getDetectedLanguageSpanish(), client.detectLanguage(input, countryHint)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeCategorizedEntitiesForSingleTextInputRunner(input -> {
final List<CategorizedEntity> entities = client.recognizeEntities(input).stream().collect(Collectors.toList());
validateCategorizedEntities(entities);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(input -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.recognizeEntities(input).iterator().hasNext());
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> {
Response<RecognizeEntitiesResultCollection> response = client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE);
response.getValue().forEach(recognizeEntitiesResult -> {
Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities);
assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage());
});
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntityRunner((inputs) ->
validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200,
client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) ->
validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200,
client.recognizeEntitiesBatchWithResponse(inputs, options, Context.NONE))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeCategorizedEntityStringInputRunner((inputs) ->
validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(),
client.recognizeEntitiesBatch(inputs, null, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) ->
validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(),
client.recognizeEntitiesBatch(inputs, language, null))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) ->
validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(),
client.recognizeEntitiesBatch(inputs, null, options))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizeEntitiesBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchWithResponseEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(15, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(22, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(30, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(14, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(15, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(126, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizePiiSingleDocumentRunner(document -> {
final PiiEntityCollection entities = client.recognizePiiEntities(document);
validatePiiEntities(getPiiEntitiesList1(), entities.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(document -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class, () ->
client.recognizePiiEntities(document).iterator().hasNext());
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.recognizePiiEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizePiiEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitySingleErrorRunner((inputs) -> {
Response<RecognizePiiEntitiesResultCollection> response = client.recognizePiiEntitiesBatchWithResponse(inputs, null, Context.NONE);
response.getValue().forEach(recognizePiiEntitiesResult -> {
Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities);
assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage());
});
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesRunner(inputs ->
validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200,
client.recognizePiiEntitiesBatchWithResponse(inputs, null, Context.NONE)));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) ->
validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200,
client.recognizePiiEntitiesBatchWithResponse(inputs, options, Context.NONE)));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizePiiEntitiesLanguageHintRunner((inputs, language) ->
validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(),
client.recognizePiiEntitiesBatch(inputs, language, null))
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) ->
validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(),
client.recognizePiiEntitiesBatch(inputs, null, options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizePiiEntitiesBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(10, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(17, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(25, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(9, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(10, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(121, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizePiiDomainFilterRunner((document, options) -> {
final PiiEntityCollection entities = client.recognizePiiEntities(document, "en", options);
validatePiiEntities(getPiiEntitiesList1ForDomainFilter(), entities.stream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputStringForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizePiiLanguageHintRunner((inputs, language) -> {
final RecognizePiiEntitiesResultCollection response = client.recognizePiiEntitiesBatch(inputs, language,
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION));
validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntitiesForDomainFilter(), response);
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesRunner((inputs) -> {
final Response<RecognizePiiEntitiesResultCollection> response = client.recognizePiiEntitiesBatchWithResponse(inputs,
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION), Context.NONE);
validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntitiesForDomainFilter(), 200, response);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputForCategoriesFilter(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
(inputs, options) -> {
final RecognizePiiEntitiesResultCollection resultCollection =
client.recognizePiiEntitiesBatch(inputs, "en", options);
validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection);
}
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntityWithCategoriesFilterFromOtherResult(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
(inputs, options) -> {
List<PiiEntityCategory> categories = new ArrayList<>();
final RecognizePiiEntitiesResultCollection resultCollection = client.recognizePiiEntitiesBatch(inputs, "en", options);
resultCollection.forEach(
result -> result.getEntities().forEach(
piiEntity -> {
final PiiEntityCategory category = piiEntity.getCategory();
if (PiiEntityCategory.ABA_ROUTING_NUMBER == category
|| PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER == category) {
categories.add(category);
}
}));
validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection);
final PiiEntityCategory[] piiEntityCategories = categories.toArray(
new PiiEntityCategory[categories.size()]);
options.setCategoriesFilter(piiEntityCategories);
final RecognizePiiEntitiesResultCollection resultCollection2 = client.recognizePiiEntitiesBatch(
inputs, "en", options);
validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection2);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeLinkedEntitiesForSingleTextInputRunner(input -> {
final List<LinkedEntity> linkedEntities = client.recognizeLinkedEntities(input)
.stream().collect(Collectors.toList());
validateLinkedEntity(getLinkedEntitiesList1().get(0), linkedEntities.get(0));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(input -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.recognizeLinkedEntities(input).iterator().hasNext());
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.recognizeLinkedEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizeLinkedEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchLinkedEntityRunner((inputs) ->
validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200,
client.recognizeLinkedEntitiesBatchWithResponse(inputs, null, Context.NONE))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) ->
validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200,
client.recognizeLinkedEntitiesBatchWithResponse(inputs, options, Context.NONE)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeLinkedStringInputRunner((inputs) ->
validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), client.recognizeLinkedEntitiesBatch(inputs, null, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeLinkedLanguageHintRunner((inputs, language) ->
validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), client.recognizeLinkedEntitiesBatch(inputs, language, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) ->
validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), client.recognizeLinkedEntitiesBatch(inputs, null, options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizeLinkedEntitiesBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(15, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(22, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
client.recognizeLinkedEntities(document).forEach(linkedEntity ->
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(30, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(14, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(15, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity ->
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(126, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractKeyPhrasesForSingleTextInputRunner(input -> {
final KeyPhrasesCollection keyPhrasesCollection = client.extractKeyPhrases(input);
validateKeyPhrases(asList("monde"), keyPhrasesCollection.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(input -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.extractKeyPhrases(input).iterator().hasNext());
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.extractKeyPhrasesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.extractKeyPhrasesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractBatchKeyPhrasesRunner((inputs) ->
validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200,
client.extractKeyPhrasesBatchWithResponse(inputs, null, Context.NONE)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractBatchKeyPhrasesShowStatsRunner((inputs, options) ->
validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200,
client.extractKeyPhrasesBatchWithResponse(inputs, options, Context.NONE)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractKeyPhrasesStringInputRunner((inputs) ->
validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), client.extractKeyPhrasesBatch(inputs, null, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractKeyPhrasesLanguageHintRunner((inputs, language) ->
validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), client.extractKeyPhrasesBatch(inputs, language, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) ->
validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), client.extractKeyPhrasesBatch(inputs, null, options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.extractKeyPhrasesBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
/**
* Test analyzing sentiment for a string input.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentForSingleTextInputRunner(input -> {
validateDocumentSentiment(false, getExpectedDocumentSentiment(), client.analyzeSentiment(input));
});
}
/**
* Test analyzing sentiment for a string input with default language hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInputWithDefaultLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentForSingleTextInputRunner(input -> {
final DocumentSentiment analyzeSentimentResult = client.analyzeSentiment(input, null);
validateDocumentSentiment(false, getExpectedDocumentSentiment(), analyzeSentimentResult);
});
}
/**
* Test analyzing sentiment for a string input and verifying the result of opinion mining.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInputWithOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentForTextInputWithOpinionMiningRunner((input, options) -> {
final DocumentSentiment analyzeSentimentResult =
client.analyzeSentiment(input, "en", options);
validateDocumentSentiment(true, getExpectedDocumentSentiment(), analyzeSentimentResult);
});
}
/**
* Verifies that a TextAnalyticsException is thrown for an empty document.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(document -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.analyzeSentiment(document));
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
/**
* Test analyzing sentiment for a duplicate ID list.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.analyzeSentimentBatchWithResponse(inputs, new TextAnalyticsRequestOptions(), Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
/**
* Verifies that an invalid document exception is returned for input documents with an empty ID.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.analyzeSentimentBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* String documents with null TextAnalyticsRequestOptions and null language code which will use the default language
* code, 'en'.
*
* {@link TextAnalyticsClient
* which TextAnalyticsRequestOptions is null and null language code which will use the default language code, 'en'.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentStringInputRunner(inputs ->
validateAnalyzeSentimentResultCollection(false, false, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, null, new TextAnalyticsRequestOptions())));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* String documents with null TextAnalyticsRequestOptions and given a language code.
*
* {@link TextAnalyticsClient
* which TextAnalyticsRequestOptions is null and given a language code.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringWithLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentLanguageHintRunner((inputs, language) ->
validateAnalyzeSentimentResultCollection(false, false, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, language, new TextAnalyticsRequestOptions())));
}
/**
* Verify that the collection result includes request statistics but not sentence options when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which to show the request statistics only and verify the analyzed sentiment result.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringShowStatisticsExcludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) ->
validateAnalyzeSentimentResultCollection(true, false, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, null, options.setIncludeOpinionMining(false))));
}
/**
* Verify that the collection result includes sentence options but not request statistics when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) -> {
options.setIncludeStatistics(false);
validateAnalyzeSentimentResultCollection(false, true, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, null, options));
});
}
/**
* Verify that the collection result includes sentence options and request statistics when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringShowStatisticsAndIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) ->
validateAnalyzeSentimentResultCollection(true, true, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, null, options)));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* TextDocumentInput documents with null TextAnalyticsRequestOptions.
*
* {@link TextAnalyticsClient
* which TextAnalyticsRequestOptions is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputWithNullRequestOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentRunner(inputs ->
validateAnalyzeSentimentResultCollectionWithResponse(false, false, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, (TextAnalyticsRequestOptions) null, Context.NONE)));
}
/**
* Verify that we can get statistics on the collection result when given a batch of
* TextDocumentInput documents with TextAnalyticsRequestOptions.
*
* {@link TextAnalyticsClient
* which TextAnalyticsRequestOptions includes request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentShowStatsRunner((inputs, requestOptions) ->
validateAnalyzeSentimentResultCollectionWithResponse(true, false, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, requestOptions, Context.NONE)));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* TextDocumentInput documents with null AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputWithNullAnalyzeSentimentOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
validateAnalyzeSentimentResultCollectionWithResponse(false, false, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, (AnalyzeSentimentOptions) null, Context.NONE)));
}
/**
* Verify that the collection result includes request statistics but not sentence options when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes request statistics but not opinion mining.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatisticsExcludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
validateAnalyzeSentimentResultCollectionWithResponse(true, false, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, options.setIncludeOpinionMining(false), Context.NONE)));
}
/**
* Verify that the collection result includes sentence options but not request statistics when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes opinion mining but not request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) -> {
options.setIncludeStatistics(false);
validateAnalyzeSentimentResultCollectionWithResponse(false, true, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, options, Context.NONE));
});
}
/**
* Verify that the collection result includes sentence options and request statistics when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatisticsAndIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
validateAnalyzeSentimentResultCollectionWithResponse(true, true, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, options, Context.NONE)));
}
/**
* Verifies that an InvalidDocumentBatch exception is returned for input documents with too many documents.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.analyzeSentimentBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(
sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(27, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(19, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(9, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(34, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(26, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(16, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(42, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(34, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(24, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(26, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(18, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(8, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(27, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(19, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(9, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(
document ->
client.analyzeSentiment(document, null, new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(138, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(130, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(120, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareStringInputWithoutOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
healthcareStringInputRunner((documents, dummyOptions) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
false,
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareStringInputWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
healthcareStringInputRunner((documents, options) -> {
boolean isValidApiVersionForDisplayName = serviceVersion != TextAnalyticsServiceVersion.V3_0
&& serviceVersion != TextAnalyticsServiceVersion.V3_1;
if (isValidApiVersionForDisplayName) {
options.setDisplayName("operationName");
}
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
PollResponse<AnalyzeHealthcareEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
if (isValidApiVersionForDisplayName) {
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
}
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
healthcareLroRunner((documents, options) -> {
boolean isValidApiVersionForDisplayName = serviceVersion != TextAnalyticsServiceVersion.V3_0
&& serviceVersion != TextAnalyticsServiceVersion.V3_1;
if (isValidApiVersionForDisplayName) {
options.setDisplayName("operationName");
}
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options, Context.NONE);
syncPoller = setPollInterval(syncPoller);
PollResponse<AnalyzeHealthcareEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
if (isValidApiVersionForDisplayName) {
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
}
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareLroPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
healthcareLroPaginationRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForMultiplePages(0, 10, 0),
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList()));
}, 10);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareLroEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyListRunner((documents, errorMessage) -> {
final IllegalArgumentException exception = assertThrows(IllegalArgumentException.class,
() -> client.beginAnalyzeHealthcareEntities(documents, null, Context.NONE).getFinalResult());
assertEquals(errorMessage, exception.getMessage());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@Disabled("https:
public void analyzeHealthcareEntitiesEmojiUnicodeCodePoint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(22, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiFamily(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(29, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(37, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesDiacriticsNfc(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(21, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesDiacriticsNfd(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(22, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(133, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesForAssertion(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeHealthcareEntitiesForAssertionRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
final HealthcareEntityAssertion assertion =
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList())
.get(0).stream().collect(Collectors.toList())
.get(0).getEntities().stream().collect(Collectors.toList())
.get(1)
.getAssertion();
assertEquals(EntityConditionality.HYPOTHETICAL, assertion.getConditionality());
assertNull(assertion.getAssociation());
assertNull(assertion.getCertainty());
});
}
@Disabled("Temporary disable it for green test")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void cancelHealthcareLro(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
cancelHealthcareLroRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.cancelOperation();
LongRunningOperationStatus operationStatus = syncPoller.poll().getStatus();
while (!LongRunningOperationStatus.USER_CANCELLED.equals(operationStatus)) {
operationStatus = syncPoller.poll().getStatus();
}
syncPoller.waitForCompletion();
Assertions.assertEquals(LongRunningOperationStatus.USER_CANCELLED, operationStatus);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchActionsRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getRecognizePiiEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getAnalyzeSentimentResultCollectionForActions(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithMultiSameKindActions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeActionsWithMultiSameKindActionsRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, null, null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(actionsResult -> {
assertEquals(2, actionsResult.getRecognizeEntitiesResults().stream().count());
assertEquals(2, actionsResult.getRecognizePiiEntitiesResults().stream().count());
assertEquals(2, actionsResult.getRecognizeLinkedEntitiesResults().stream().count());
assertEquals(2, actionsResult.getAnalyzeSentimentResults().stream().count());
assertEquals(2, actionsResult.getExtractKeyPhrasesResults().stream().count());
});
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithActionNames(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeActionsWithActionNamesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(actionsResult -> {
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getRecognizeEntitiesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getRecognizePiiEntitiesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getAnalyzeSentimentResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getExtractKeyPhrasesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
});
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchActionsPaginationRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable>
syncPoller = client.beginAnalyzeActions(
documents, tasks, new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
getExpectedAnalyzeActionsResultListForMultiplePages(0, 20, 2),
result.stream().collect(Collectors.toList()));
}, 22);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyListRunner((documents, errorMessage) -> {
final IllegalArgumentException exception = assertThrows(IllegalArgumentException.class,
() -> client.beginAnalyzeActions(documents,
new TextAnalyticsActions().setRecognizeEntitiesActions(new RecognizeEntitiesAction()),
null, Context.NONE)
.getFinalResult());
assertEquals(errorMessage, exception.getMessage());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeEntitiesRecognitionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeEntitiesRecognitionRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
}
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzePiiEntityRecognitionWithCategoriesFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzePiiEntityRecognitionWithCategoriesFiltersRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getExpectedBatchPiiEntitiesForCategoriesFilter(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
}
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzePiiEntityRecognitionWithDomainFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzePiiEntityRecognitionWithDomainFiltersRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getExpectedBatchPiiEntitiesForDomainFilter(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
}
);
}
@Disabled("Linked entity action do not work")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeLinkedEntityActions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeLinkedEntityRecognitionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizeLinkedEntitiesActionResult(false, null,
TIME_NOW, getRecognizeLinkedEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeKeyPhrasesExtractionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractKeyPhrasesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getExpectedBatchTextSentiment(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeHealthcareEntitiesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedAnalyzeHealthcareEntitiesActionResult(false, null, TIME_NOW,
getExpectedAnalyzeHealthcareEntitiesResultCollection(2,
asList(
getRecognizeHealthcareEntitiesResult1("0"),
getRecognizeHealthcareEntitiesResult2())),
null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntitiesAction(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getRecognizeCustomEntitiesResults().forEach(
customEntitiesActionResult -> customEntitiesActionResult.getDocumentsResults().forEach(
documentResult -> validateCategorizedEntities(
documentResult.getEntities().stream().collect(Collectors.toList())))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassificationAction(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomSingleCategoryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getSingleLabelClassifyResults().forEach(
customSingleCategoryActionResult -> customSingleCategoryActionResult.getDocumentsResults().forEach(
documentResult -> validateLabelClassificationResult(documentResult))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiCategoryClassifyAction(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomMultiCategoryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getMultiLabelClassifyResults().forEach(
customMultiCategoryActionResult -> customMultiCategoryActionResult.getDocumentsResults().forEach(
documentResult -> validateLabelClassificationResult(documentResult))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntitiesStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesRunner((documents, parameters) -> {
SyncPoller<RecognizeCustomEntitiesOperationDetail, RecognizeCustomEntitiesPagedIterable> syncPoller =
client.beginRecognizeCustomEntities(documents, parameters.get(0), parameters.get(1));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
RecognizeCustomEntitiesPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult ->
validateCategorizedEntities(documentResult.getEntities().stream().collect(Collectors.toList()))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntities(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesRunner((documents, parameters) -> {
RecognizeCustomEntitiesOptions options = new RecognizeCustomEntitiesOptions()
.setDisplayName("operationName");
SyncPoller<RecognizeCustomEntitiesOperationDetail, RecognizeCustomEntitiesPagedIterable> syncPoller =
client.beginRecognizeCustomEntities(documents, parameters.get(0), parameters.get(1), "en", options);
syncPoller = setPollInterval(syncPoller);
PollResponse<RecognizeCustomEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
RecognizeCustomEntitiesPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult ->
validateCategorizedEntities(documentResult.getEntities().stream().collect(Collectors.toList()))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassificationStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomSingleLabelRunner((documents, parameters) -> {
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedIterable> syncPoller =
client.beginSingleLabelClassify(documents, parameters.get(0), parameters.get(1));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ClassifyDocumentPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomSingleLabelRunner((documents, parameters) -> {
SingleLabelClassifyOptions options = new SingleLabelClassifyOptions().setDisplayName("operationName");
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedIterable> syncPoller =
client.beginSingleLabelClassify(documents, parameters.get(0), parameters.get(1), "en", options);
syncPoller = setPollInterval(syncPoller);
PollResponse<ClassifyDocumentOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
ClassifyDocumentPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiLabelClassificationStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomMultiLabelRunner((documents, parameters) -> {
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedIterable> syncPoller =
client.beginMultiLabelClassify(documents, parameters.get(0), parameters.get(1));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ClassifyDocumentPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomMultiLabelRunner((documents, parameters) -> {
MultiLabelClassifyOptions options = new MultiLabelClassifyOptions().setDisplayName("operationName");
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedIterable> syncPoller =
client.beginMultiLabelClassify(documents, parameters.get(0), parameters.get(1), "en", options);
syncPoller = setPollInterval(syncPoller);
PollResponse<ClassifyDocumentOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
ClassifyDocumentPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExtractiveSummaryActionResult(false, null,
TIME_NOW,
getExpectedExtractiveSummaryResultCollection(getExpectedExtractiveSummaryResultSortByOffset()),
null))),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
}, null, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionSortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(isAscendingOrderByOffSet(
documentResult.getSentences().stream().collect(Collectors.toList()))))));
}, 4, ExtractiveSummarySentencesOrder.OFFSET);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionSortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(isDescendingOrderByRankScore(
documentResult.getSentences().stream().collect(Collectors.toList()))))));
}, 4, ExtractiveSummarySentencesOrder.RANK);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithSentenceCountLessThanMaxCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(
documentResult.getSentences().stream().collect(Collectors.toList()).size() < 20))));
}, 20, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithNonDefaultSentenceCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertEquals(
documentResult.getSentences().stream().collect(Collectors.toList()).size(), 5))));
}, 5, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionMaxSentenceCountInvalidRangeException(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
int[] invalidMaxSentenceCounts = {0, 21};
for (int invalidCount: invalidMaxSentenceCounts) {
extractiveSummaryActionRunner(
(documents, tasks) -> {
HttpResponseException exception = assertThrows(HttpResponseException.class, () -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
});
assertEquals(
INVALID_PARAMETER_VALUE,
((TextAnalyticsError) exception.getValue()).getErrorCode());
}, invalidCount, null);
}
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeAbstractiveSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
abstractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getAbstractiveSummaryActionResult(false, null,
TIME_NOW,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
null
)))
)),
result.stream().collect(Collectors.toList()));
}, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryDuplicateIdInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.beginAbstractSummary(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.beginAbstractSummary(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.beginAbstractSummary(inputs, null, null).getFinalResult());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_PARAMETER_VALUE, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
abstractiveSummaryRunner((documents, options) -> {
SyncPoller<AbstractiveSummaryOperationDetail, AbstractiveSummaryPagedIterable> syncPoller =
client.beginAbstractSummary(documents);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AbstractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResult -> validateAbstractiveSummaryResultCollection(false,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
documentResult));
}, 4);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
abstractiveSummaryMaxOverloadRunner((documents, options) -> {
SyncPoller<AbstractiveSummaryOperationDetail, AbstractiveSummaryPagedIterable> syncPoller =
client.beginAbstractSummary(documents, options, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AbstractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResult -> validateAbstractiveSummaryResultCollection(false,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
documentResult));
}, 4);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
isAscendingOrderByOffSet(documentResult.getSentences().stream().collect(Collectors.toList())))
));
}, 4, ExtractiveSummarySentencesOrder.OFFSET);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
isDescendingOrderByRankScore(
documentResult.getSentences().stream().collect(Collectors.toList())))
));
}, 4, ExtractiveSummarySentencesOrder.RANK);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySentenceCountLessThanMaxCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
documentResult.getSentences().stream().collect(Collectors.toList()).size() < 20)));
}, 20, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummaryNonDefaultSentenceCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertEquals(
documentResult.getSentences().stream().collect(Collectors.toList()).size(), 5)));
}, 5, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummaryMaxSentenceCountInvalidRangeException(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
int[] invalidMaxSentenceCounts = {0, 21};
for (int invalidCount: invalidMaxSentenceCounts) {
extractiveSummaryRunner(
(documents, options) -> {
HttpResponseException exception = assertThrows(HttpResponseException.class, () -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
});
assertEquals(
INVALID_PARAMETER_VALUE,
((TextAnalyticsError) exception.getValue()).getErrorCode());
}, invalidCount, null);
}
}
} | class TextAnalyticsClientTest extends TextAnalyticsClientTestBase {
private TextAnalyticsClient client;
private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertSync()
.build();
}
private TextAnalyticsClient getTextAnalyticsClient(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion, boolean isStaticResource) {
return getTextAnalyticsClientBuilder(
buildSyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient),
serviceVersion,
isStaticResource)
.buildClient();
}
/**
* Verify that we can get statistics on the collection result when given a batch of documents with options.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageShowStatisticsRunner((inputs, options) -> validateDetectLanguageResultCollectionWithResponse(true,
getExpectedBatchDetectedLanguages(), 200,
client.detectLanguageBatchWithResponse(inputs, options, Context.NONE)));
}
/**
* Test Detect batch of documents languages.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageRunner((inputs) -> validateDetectLanguageResultCollectionWithResponse(false,
getExpectedBatchDetectedLanguages(), 200,
client.detectLanguageBatchWithResponse(inputs, null, Context.NONE)));
}
/**
* Test detect batch languages for a list of string input with country hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguagesCountryHintRunner((inputs, countryHint) -> validateDetectLanguageResultCollection(
false, getExpectedBatchDetectedLanguages(),
client.detectLanguageBatch(inputs, countryHint, null)));
}
/**
* Test detect batch languages for a list of string input with request options
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> validateDetectLanguageResultCollection(true,
getExpectedBatchDetectedLanguages(), client.detectLanguageBatch(inputs, null, options)));
}
/**
* Test detect batch languages for a list of string input.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageStringInputRunner((inputs) -> validateDetectLanguageResultCollection(
false, getExpectedBatchDetectedLanguages(), client.detectLanguageBatch(inputs, null, null)));
}
/**
* Verifies that a single DetectLanguageResult is returned for a document to detect language.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectSingleTextLanguageRunner(input ->
validatePrimaryLanguage(getDetectedLanguageEnglish(), client.detectLanguage(input)));
}
/**
* Verifies that a TextAnalyticsException is thrown for an empty document.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(input -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.detectLanguage(input));
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
/**
* Verifies that a bad request exception is returned for input documents with same IDs.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageDuplicateIdRunner((inputs, options) -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.detectLanguageBatchWithResponse(inputs, options, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
/**
* Verifies that an invalid document exception is returned for input documents with an empty ID.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageInputEmptyIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.detectLanguageBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
/**
* Verifies that a TextAnalyticsException is thrown for a document with invalid country hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageInvalidCountryHintRunner((input, countryHint) -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.detectLanguage(input, countryHint));
assertEquals(INVALID_COUNTRY_HINT, exception.getErrorCode());
});
}
/**
* Verify that with countryHint with empty string will not throw exception.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageEmptyCountryHintRunner((input, countryHint) ->
validatePrimaryLanguage(getDetectedLanguageSpanish(), client.detectLanguage(input, countryHint)));
}
/**
* Verify that with countryHint with "none" will not throw exception.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageNoneCountryHintRunner((input, countryHint) ->
validatePrimaryLanguage(getDetectedLanguageSpanish(), client.detectLanguage(input, countryHint)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeCategorizedEntitiesForSingleTextInputRunner(input -> {
final List<CategorizedEntity> entities = client.recognizeEntities(input).stream().collect(Collectors.toList());
validateCategorizedEntities(entities);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(input -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.recognizeEntities(input).iterator().hasNext());
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> {
Response<RecognizeEntitiesResultCollection> response = client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE);
response.getValue().forEach(recognizeEntitiesResult -> {
Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities);
assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage());
});
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntityRunner((inputs) ->
validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200,
client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) ->
validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200,
client.recognizeEntitiesBatchWithResponse(inputs, options, Context.NONE))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeCategorizedEntityStringInputRunner((inputs) ->
validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(),
client.recognizeEntitiesBatch(inputs, null, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) ->
validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(),
client.recognizeEntitiesBatch(inputs, language, null))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) ->
validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(),
client.recognizeEntitiesBatch(inputs, null, options))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizeEntitiesBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchWithResponseEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(15, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(22, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(30, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(14, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(15, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(126, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizePiiSingleDocumentRunner(document -> {
final PiiEntityCollection entities = client.recognizePiiEntities(document);
validatePiiEntities(getPiiEntitiesList1(), entities.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(document -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class, () ->
client.recognizePiiEntities(document).iterator().hasNext());
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.recognizePiiEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizePiiEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitySingleErrorRunner((inputs) -> {
Response<RecognizePiiEntitiesResultCollection> response = client.recognizePiiEntitiesBatchWithResponse(inputs, null, Context.NONE);
response.getValue().forEach(recognizePiiEntitiesResult -> {
Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities);
assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage());
});
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesRunner(inputs ->
validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200,
client.recognizePiiEntitiesBatchWithResponse(inputs, null, Context.NONE)));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) ->
validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200,
client.recognizePiiEntitiesBatchWithResponse(inputs, options, Context.NONE)));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizePiiEntitiesLanguageHintRunner((inputs, language) ->
validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(),
client.recognizePiiEntitiesBatch(inputs, language, null))
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) ->
validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(),
client.recognizePiiEntitiesBatch(inputs, null, options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizePiiEntitiesBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(10, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(17, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(25, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(9, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(10, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(121, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizePiiDomainFilterRunner((document, options) -> {
final PiiEntityCollection entities = client.recognizePiiEntities(document, "en", options);
validatePiiEntities(getPiiEntitiesList1ForDomainFilter(), entities.stream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputStringForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizePiiLanguageHintRunner((inputs, language) -> {
final RecognizePiiEntitiesResultCollection response = client.recognizePiiEntitiesBatch(inputs, language,
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION));
validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntitiesForDomainFilter(), response);
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesRunner((inputs) -> {
final Response<RecognizePiiEntitiesResultCollection> response = client.recognizePiiEntitiesBatchWithResponse(inputs,
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION), Context.NONE);
validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntitiesForDomainFilter(), 200, response);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputForCategoriesFilter(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
(inputs, options) -> {
final RecognizePiiEntitiesResultCollection resultCollection =
client.recognizePiiEntitiesBatch(inputs, "en", options);
validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection);
}
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntityWithCategoriesFilterFromOtherResult(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
(inputs, options) -> {
List<PiiEntityCategory> categories = new ArrayList<>();
final RecognizePiiEntitiesResultCollection resultCollection = client.recognizePiiEntitiesBatch(inputs, "en", options);
resultCollection.forEach(
result -> result.getEntities().forEach(
piiEntity -> {
final PiiEntityCategory category = piiEntity.getCategory();
if (PiiEntityCategory.ABA_ROUTING_NUMBER == category
|| PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER == category) {
categories.add(category);
}
}));
validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection);
final PiiEntityCategory[] piiEntityCategories = categories.toArray(
new PiiEntityCategory[categories.size()]);
options.setCategoriesFilter(piiEntityCategories);
final RecognizePiiEntitiesResultCollection resultCollection2 = client.recognizePiiEntitiesBatch(
inputs, "en", options);
validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection2);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeLinkedEntitiesForSingleTextInputRunner(input -> {
final List<LinkedEntity> linkedEntities = client.recognizeLinkedEntities(input)
.stream().collect(Collectors.toList());
validateLinkedEntity(getLinkedEntitiesList1().get(0), linkedEntities.get(0));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(input -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.recognizeLinkedEntities(input).iterator().hasNext());
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.recognizeLinkedEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizeLinkedEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchLinkedEntityRunner((inputs) ->
validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200,
client.recognizeLinkedEntitiesBatchWithResponse(inputs, null, Context.NONE))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) ->
validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200,
client.recognizeLinkedEntitiesBatchWithResponse(inputs, options, Context.NONE)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeLinkedStringInputRunner((inputs) ->
validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), client.recognizeLinkedEntitiesBatch(inputs, null, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeLinkedLanguageHintRunner((inputs, language) ->
validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), client.recognizeLinkedEntitiesBatch(inputs, language, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) ->
validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), client.recognizeLinkedEntitiesBatch(inputs, null, options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizeLinkedEntitiesBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(15, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(22, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
client.recognizeLinkedEntities(document).forEach(linkedEntity ->
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(30, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(14, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(15, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity ->
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(126, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractKeyPhrasesForSingleTextInputRunner(input -> {
final KeyPhrasesCollection keyPhrasesCollection = client.extractKeyPhrases(input);
validateKeyPhrases(asList("monde"), keyPhrasesCollection.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(input -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.extractKeyPhrases(input).iterator().hasNext());
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.extractKeyPhrasesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.extractKeyPhrasesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractBatchKeyPhrasesRunner((inputs) ->
validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200,
client.extractKeyPhrasesBatchWithResponse(inputs, null, Context.NONE)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractBatchKeyPhrasesShowStatsRunner((inputs, options) ->
validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200,
client.extractKeyPhrasesBatchWithResponse(inputs, options, Context.NONE)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractKeyPhrasesStringInputRunner((inputs) ->
validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), client.extractKeyPhrasesBatch(inputs, null, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractKeyPhrasesLanguageHintRunner((inputs, language) ->
validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), client.extractKeyPhrasesBatch(inputs, language, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) ->
validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), client.extractKeyPhrasesBatch(inputs, null, options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.extractKeyPhrasesBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
/**
* Test analyzing sentiment for a string input.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentForSingleTextInputRunner(input -> {
validateDocumentSentiment(false, getExpectedDocumentSentiment(), client.analyzeSentiment(input));
});
}
/**
* Test analyzing sentiment for a string input with default language hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInputWithDefaultLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentForSingleTextInputRunner(input -> {
final DocumentSentiment analyzeSentimentResult = client.analyzeSentiment(input, null);
validateDocumentSentiment(false, getExpectedDocumentSentiment(), analyzeSentimentResult);
});
}
/**
* Test analyzing sentiment for a string input and verifying the result of opinion mining.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInputWithOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentForTextInputWithOpinionMiningRunner((input, options) -> {
final DocumentSentiment analyzeSentimentResult =
client.analyzeSentiment(input, "en", options);
validateDocumentSentiment(true, getExpectedDocumentSentiment(), analyzeSentimentResult);
});
}
/**
* Verifies that a TextAnalyticsException is thrown for an empty document.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(document -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.analyzeSentiment(document));
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
/**
* Test analyzing sentiment for a duplicate ID list.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.analyzeSentimentBatchWithResponse(inputs, new TextAnalyticsRequestOptions(), Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
/**
* Verifies that an invalid document exception is returned for input documents with an empty ID.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.analyzeSentimentBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* String documents with null TextAnalyticsRequestOptions and null language code which will use the default language
* code, 'en'.
*
* {@link TextAnalyticsClient
* which TextAnalyticsRequestOptions is null and null language code which will use the default language code, 'en'.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentStringInputRunner(inputs ->
validateAnalyzeSentimentResultCollection(false, false, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, null, new TextAnalyticsRequestOptions())));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* String documents with null TextAnalyticsRequestOptions and given a language code.
*
* {@link TextAnalyticsClient
* which TextAnalyticsRequestOptions is null and given a language code.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringWithLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentLanguageHintRunner((inputs, language) ->
validateAnalyzeSentimentResultCollection(false, false, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, language, new TextAnalyticsRequestOptions())));
}
/**
* Verify that the collection result includes request statistics but not sentence options when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which to show the request statistics only and verify the analyzed sentiment result.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringShowStatisticsExcludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) ->
validateAnalyzeSentimentResultCollection(true, false, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, null, options.setIncludeOpinionMining(false))));
}
/**
* Verify that the collection result includes sentence options but not request statistics when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) -> {
options.setIncludeStatistics(false);
validateAnalyzeSentimentResultCollection(false, true, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, null, options));
});
}
/**
* Verify that the collection result includes sentence options and request statistics when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringShowStatisticsAndIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) ->
validateAnalyzeSentimentResultCollection(true, true, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, null, options)));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* TextDocumentInput documents with null TextAnalyticsRequestOptions.
*
* {@link TextAnalyticsClient
* which TextAnalyticsRequestOptions is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputWithNullRequestOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentRunner(inputs ->
validateAnalyzeSentimentResultCollectionWithResponse(false, false, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, (TextAnalyticsRequestOptions) null, Context.NONE)));
}
/**
* Verify that we can get statistics on the collection result when given a batch of
* TextDocumentInput documents with TextAnalyticsRequestOptions.
*
* {@link TextAnalyticsClient
* which TextAnalyticsRequestOptions includes request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentShowStatsRunner((inputs, requestOptions) ->
validateAnalyzeSentimentResultCollectionWithResponse(true, false, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, requestOptions, Context.NONE)));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* TextDocumentInput documents with null AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputWithNullAnalyzeSentimentOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
validateAnalyzeSentimentResultCollectionWithResponse(false, false, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, (AnalyzeSentimentOptions) null, Context.NONE)));
}
/**
* Verify that the collection result includes request statistics but not sentence options when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes request statistics but not opinion mining.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatisticsExcludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
validateAnalyzeSentimentResultCollectionWithResponse(true, false, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, options.setIncludeOpinionMining(false), Context.NONE)));
}
/**
* Verify that the collection result includes sentence options but not request statistics when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes opinion mining but not request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) -> {
options.setIncludeStatistics(false);
validateAnalyzeSentimentResultCollectionWithResponse(false, true, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, options, Context.NONE));
});
}
/**
* Verify that the collection result includes sentence options and request statistics when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatisticsAndIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
validateAnalyzeSentimentResultCollectionWithResponse(true, true, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, options, Context.NONE)));
}
/**
* Verifies that an InvalidDocumentBatch exception is returned for input documents with too many documents.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.analyzeSentimentBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(
sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(27, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(19, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(9, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(34, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(26, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(16, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(42, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(34, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(24, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(26, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(18, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(8, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(27, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(19, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(9, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(
document ->
client.analyzeSentiment(document, null, new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(138, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(130, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(120, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareStringInputWithoutOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
healthcareStringInputRunner((documents, dummyOptions) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
false,
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareStringInputWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
healthcareStringInputRunner((documents, options) -> {
boolean isValidApiVersionForDisplayName = serviceVersion != TextAnalyticsServiceVersion.V3_0
&& serviceVersion != TextAnalyticsServiceVersion.V3_1;
if (isValidApiVersionForDisplayName) {
options.setDisplayName("operationName");
}
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
PollResponse<AnalyzeHealthcareEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
if (isValidApiVersionForDisplayName) {
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
}
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
healthcareLroRunner((documents, options) -> {
boolean isValidApiVersionForDisplayName = serviceVersion != TextAnalyticsServiceVersion.V3_0
&& serviceVersion != TextAnalyticsServiceVersion.V3_1;
if (isValidApiVersionForDisplayName) {
options.setDisplayName("operationName");
}
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options, Context.NONE);
syncPoller = setPollInterval(syncPoller);
PollResponse<AnalyzeHealthcareEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
if (isValidApiVersionForDisplayName) {
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
}
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareLroPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
healthcareLroPaginationRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForMultiplePages(0, 10, 0),
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList()));
}, 10);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareLroEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyListRunner((documents, errorMessage) -> {
final IllegalArgumentException exception = assertThrows(IllegalArgumentException.class,
() -> client.beginAnalyzeHealthcareEntities(documents, null, Context.NONE).getFinalResult());
assertEquals(errorMessage, exception.getMessage());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@Disabled("https:
public void analyzeHealthcareEntitiesEmojiUnicodeCodePoint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(22, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiFamily(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(29, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(37, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesDiacriticsNfc(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(21, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesDiacriticsNfd(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(22, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(133, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesForAssertion(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeHealthcareEntitiesForAssertionRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
final HealthcareEntityAssertion assertion =
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList())
.get(0).stream().collect(Collectors.toList())
.get(0).getEntities().stream().collect(Collectors.toList())
.get(1)
.getAssertion();
assertEquals(EntityConditionality.HYPOTHETICAL, assertion.getConditionality());
assertNull(assertion.getAssociation());
assertNull(assertion.getCertainty());
});
}
@Disabled("Temporary disable it for green test")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void cancelHealthcareLro(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
cancelHealthcareLroRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.cancelOperation();
LongRunningOperationStatus operationStatus = syncPoller.poll().getStatus();
while (!LongRunningOperationStatus.USER_CANCELLED.equals(operationStatus)) {
operationStatus = syncPoller.poll().getStatus();
}
syncPoller.waitForCompletion();
Assertions.assertEquals(LongRunningOperationStatus.USER_CANCELLED, operationStatus);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchActionsRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getRecognizePiiEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getAnalyzeSentimentResultCollectionForActions(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithMultiSameKindActions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeActionsWithMultiSameKindActionsRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, null, null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(actionsResult -> {
assertEquals(2, actionsResult.getRecognizeEntitiesResults().stream().count());
assertEquals(2, actionsResult.getRecognizePiiEntitiesResults().stream().count());
assertEquals(2, actionsResult.getRecognizeLinkedEntitiesResults().stream().count());
assertEquals(2, actionsResult.getAnalyzeSentimentResults().stream().count());
assertEquals(2, actionsResult.getExtractKeyPhrasesResults().stream().count());
});
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithActionNames(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeActionsWithActionNamesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(actionsResult -> {
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getRecognizeEntitiesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getRecognizePiiEntitiesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getAnalyzeSentimentResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getExtractKeyPhrasesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
});
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchActionsPaginationRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable>
syncPoller = client.beginAnalyzeActions(
documents, tasks, new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
getExpectedAnalyzeActionsResultListForMultiplePages(0, 20, 2),
result.stream().collect(Collectors.toList()));
}, 22);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyListRunner((documents, errorMessage) -> {
final IllegalArgumentException exception = assertThrows(IllegalArgumentException.class,
() -> client.beginAnalyzeActions(documents,
new TextAnalyticsActions().setRecognizeEntitiesActions(new RecognizeEntitiesAction()),
null, Context.NONE)
.getFinalResult());
assertEquals(errorMessage, exception.getMessage());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeEntitiesRecognitionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeEntitiesRecognitionRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
}
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzePiiEntityRecognitionWithCategoriesFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzePiiEntityRecognitionWithCategoriesFiltersRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getExpectedBatchPiiEntitiesForCategoriesFilter(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
}
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzePiiEntityRecognitionWithDomainFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzePiiEntityRecognitionWithDomainFiltersRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getExpectedBatchPiiEntitiesForDomainFilter(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
}
);
}
@Disabled("Linked entity action do not work")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeLinkedEntityActions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeLinkedEntityRecognitionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizeLinkedEntitiesActionResult(false, null,
TIME_NOW, getRecognizeLinkedEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeKeyPhrasesExtractionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractKeyPhrasesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getExpectedBatchTextSentiment(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeHealthcareEntitiesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedAnalyzeHealthcareEntitiesActionResult(false, null, TIME_NOW,
getExpectedAnalyzeHealthcareEntitiesResultCollection(2,
asList(
getRecognizeHealthcareEntitiesResult1("0"),
getRecognizeHealthcareEntitiesResult2())),
null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntitiesAction(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getRecognizeCustomEntitiesResults().forEach(
customEntitiesActionResult -> customEntitiesActionResult.getDocumentsResults().forEach(
documentResult -> validateCategorizedEntities(
documentResult.getEntities().stream().collect(Collectors.toList())))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassificationAction(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomSingleCategoryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getSingleLabelClassifyResults().forEach(
customSingleCategoryActionResult -> customSingleCategoryActionResult.getDocumentsResults().forEach(
documentResult -> validateLabelClassificationResult(documentResult))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiCategoryClassifyAction(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomMultiCategoryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getMultiLabelClassifyResults().forEach(
customMultiCategoryActionResult -> customMultiCategoryActionResult.getDocumentsResults().forEach(
documentResult -> validateLabelClassificationResult(documentResult))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntitiesStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesRunner((documents, parameters) -> {
SyncPoller<RecognizeCustomEntitiesOperationDetail, RecognizeCustomEntitiesPagedIterable> syncPoller =
client.beginRecognizeCustomEntities(documents, parameters.get(0), parameters.get(1));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
RecognizeCustomEntitiesPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult ->
validateCategorizedEntities(documentResult.getEntities().stream().collect(Collectors.toList()))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntities(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesRunner((documents, parameters) -> {
RecognizeCustomEntitiesOptions options = new RecognizeCustomEntitiesOptions()
.setDisplayName("operationName");
SyncPoller<RecognizeCustomEntitiesOperationDetail, RecognizeCustomEntitiesPagedIterable> syncPoller =
client.beginRecognizeCustomEntities(documents, parameters.get(0), parameters.get(1), "en", options);
syncPoller = setPollInterval(syncPoller);
PollResponse<RecognizeCustomEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
RecognizeCustomEntitiesPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult ->
validateCategorizedEntities(documentResult.getEntities().stream().collect(Collectors.toList()))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassificationStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomSingleLabelRunner((documents, parameters) -> {
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedIterable> syncPoller =
client.beginSingleLabelClassify(documents, parameters.get(0), parameters.get(1));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ClassifyDocumentPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomSingleLabelRunner((documents, parameters) -> {
SingleLabelClassifyOptions options = new SingleLabelClassifyOptions().setDisplayName("operationName");
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedIterable> syncPoller =
client.beginSingleLabelClassify(documents, parameters.get(0), parameters.get(1), "en", options);
syncPoller = setPollInterval(syncPoller);
PollResponse<ClassifyDocumentOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
ClassifyDocumentPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiLabelClassificationStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomMultiLabelRunner((documents, parameters) -> {
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedIterable> syncPoller =
client.beginMultiLabelClassify(documents, parameters.get(0), parameters.get(1));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ClassifyDocumentPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomMultiLabelRunner((documents, parameters) -> {
MultiLabelClassifyOptions options = new MultiLabelClassifyOptions().setDisplayName("operationName");
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedIterable> syncPoller =
client.beginMultiLabelClassify(documents, parameters.get(0), parameters.get(1), "en", options);
syncPoller = setPollInterval(syncPoller);
PollResponse<ClassifyDocumentOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
ClassifyDocumentPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExtractiveSummaryActionResult(false, null,
TIME_NOW,
getExpectedExtractiveSummaryResultCollection(getExpectedExtractiveSummaryResultSortByOffset()),
null))),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
}, null, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionSortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(isAscendingOrderByOffSet(
documentResult.getSentences().stream().collect(Collectors.toList()))))));
}, 4, ExtractiveSummarySentencesOrder.OFFSET);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionSortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(isDescendingOrderByRankScore(
documentResult.getSentences().stream().collect(Collectors.toList()))))));
}, 4, ExtractiveSummarySentencesOrder.RANK);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithSentenceCountLessThanMaxCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(
documentResult.getSentences().stream().collect(Collectors.toList()).size() < 20))));
}, 20, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithNonDefaultSentenceCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertEquals(
documentResult.getSentences().stream().collect(Collectors.toList()).size(), 5))));
}, 5, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionMaxSentenceCountInvalidRangeException(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
int[] invalidMaxSentenceCounts = {0, 21};
for (int invalidCount: invalidMaxSentenceCounts) {
extractiveSummaryActionRunner(
(documents, tasks) -> {
HttpResponseException exception = assertThrows(HttpResponseException.class, () -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
});
assertEquals(
INVALID_PARAMETER_VALUE,
((TextAnalyticsError) exception.getValue()).getErrorCode());
}, invalidCount, null);
}
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeAbstractiveSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
abstractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getAbstractiveSummaryActionResult(false, null,
TIME_NOW,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
null
)))
)),
result.stream().collect(Collectors.toList()));
}, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryDuplicateIdInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.beginAbstractSummary(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.beginAbstractSummary(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.beginAbstractSummary(inputs, null, null).getFinalResult());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_PARAMETER_VALUE, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
abstractiveSummaryRunner((documents, options) -> {
SyncPoller<AbstractiveSummaryOperationDetail, AbstractiveSummaryPagedIterable> syncPoller =
client.beginAbstractSummary(documents);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AbstractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResult -> validateAbstractiveSummaryResultCollection(false,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
documentResult));
}, 4);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
abstractiveSummaryMaxOverloadRunner((documents, options) -> {
SyncPoller<AbstractiveSummaryOperationDetail, AbstractiveSummaryPagedIterable> syncPoller =
client.beginAbstractSummary(documents, options, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AbstractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResult -> validateAbstractiveSummaryResultCollection(false,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
documentResult));
}, 4);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
isAscendingOrderByOffSet(documentResult.getSentences().stream().collect(Collectors.toList())))
));
}, 4, ExtractiveSummarySentencesOrder.OFFSET);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
isDescendingOrderByRankScore(
documentResult.getSentences().stream().collect(Collectors.toList())))
));
}, 4, ExtractiveSummarySentencesOrder.RANK);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySentenceCountLessThanMaxCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
documentResult.getSentences().stream().collect(Collectors.toList()).size() < 20)));
}, 20, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummaryNonDefaultSentenceCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertEquals(
documentResult.getSentences().stream().collect(Collectors.toList()).size(), 5)));
}, 5, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummaryMaxSentenceCountInvalidRangeException(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
int[] invalidMaxSentenceCounts = {0, 21};
for (int invalidCount: invalidMaxSentenceCounts) {
extractiveSummaryRunner(
(documents, options) -> {
HttpResponseException exception = assertThrows(HttpResponseException.class, () -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
});
assertEquals(
INVALID_PARAMETER_VALUE,
((TextAnalyticsError) exception.getValue()).getErrorCode());
}, invalidCount, null);
}
}
} |
There is a test regression that Linked Entities Task doesn't work | public void analyzeActionsStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeActionsStringInputRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW,
getRecognizePiiEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getAnalyzeSentimentResultCollectionForActions(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
} | IterableStream.of(null), | public void analyzeActionsStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeActionsStringInputRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW,
getRecognizePiiEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getAnalyzeSentimentResultCollectionForActions(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
} | class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase {
private static final Duration DEFAULT_TIMEOUT = Duration.ofSeconds(30);
private TextAnalyticsAsyncClient client;
private HttpClient buildAsyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertAsync()
.build();
}
private TextAnalyticsAsyncClient getTextAnalyticsAsyncClient(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion, boolean isStaticResource) {
return getTextAnalyticsClientBuilder(
buildAsyncAssertingClient(interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient),
serviceVersion,
isStaticResource)
.buildAsyncClient();
}
/**
* Verify that we can get statistics on the collection result when given a batch of documents with request options.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageShowStatisticsRunner((inputs, options) ->
StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options))
.assertNext(response ->
validateDetectLanguageResultCollectionWithResponse(true, getExpectedBatchDetectedLanguages(),
200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Test to detect language for each {@code DetectLanguageResult} input of a batch.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageRunner((inputs) ->
StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null))
.assertNext(response ->
validateDetectLanguageResultCollectionWithResponse(false, getExpectedBatchDetectedLanguages(),
200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Test to detect language for each string input of batch with given country hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguagesCountryHintRunner((inputs, countryHint) ->
StepVerifier.create(client.detectLanguageBatch(inputs, countryHint, null))
.assertNext(actualResults ->
validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), actualResults))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Test to detect language for each string input of batch with request options.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) ->
StepVerifier.create(client.detectLanguageBatch(inputs, null, options))
.assertNext(response -> validateDetectLanguageResultCollection(true, getExpectedBatchDetectedLanguages(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Test to detect language for each string input of batch.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageStringInputRunner((inputs) ->
StepVerifier.create(client.detectLanguageBatch(inputs, null, null))
.assertNext(response -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that a single DetectedLanguage is returned for a document to detectLanguage.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectSingleTextLanguageRunner(input ->
StepVerifier.create(client.detectLanguage(input))
.assertNext(response -> validatePrimaryLanguage(getDetectedLanguageEnglish(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an TextAnalyticsException is thrown for a document with invalid country hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageInvalidCountryHintRunner((input, countryHint) ->
StepVerifier.create(client.detectLanguage(input, countryHint))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_COUNTRY_HINT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that TextAnalyticsException is thrown for an empty document.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(input ->
StepVerifier.create(client.detectLanguage(input))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that a bad request exception is returned for input documents with same ids.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageDuplicateIdRunner((inputs, options) ->
StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an invalid document exception is returned for input documents with an empty ID.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageInputEmptyIdRunner(inputs ->
StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that with countryHint with empty string will not throw exception.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageEmptyCountryHintRunner((input, countryHint) ->
StepVerifier.create(client.detectLanguage(input, countryHint))
.assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that with countryHint with "none" will not throw exception.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageNoneCountryHintRunner((input, countryHint) ->
StepVerifier.create(client.detectLanguage(input, countryHint))
.assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeCategorizedEntitiesForSingleTextInputRunner(input ->
StepVerifier.create(client.recognizeEntities(input))
.assertNext(response -> validateCategorizedEntities(response.stream().collect(Collectors.toList())))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(input ->
StepVerifier.create(client.recognizeEntities(input))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntitySingleErrorRunner((inputs) ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null))
.assertNext(resultCollection -> resultCollection.getValue().forEach(recognizeEntitiesResult -> {
Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities);
assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntityRunner((inputs) ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null))
.assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, response))
.verifyComplete());
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, options))
.assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeCategorizedEntityStringInputRunner((inputs) ->
StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null))
.assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.recognizeEntitiesBatch(inputs, language, null))
.assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options))
.assertNext(response -> validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@Disabled("https:
public void recognizeEntitiesBatchWithResponseEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(15, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(22, categorizedEntity.getOffset());
})).verifyComplete(), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(30, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(14, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(15, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(126, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizePiiSingleDocumentRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(response -> validatePiiEntities(getPiiEntitiesList1(), response.stream().collect(Collectors.toList())))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitySingleErrorRunner((inputs) ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null))
.assertNext(resultCollection -> resultCollection.getValue().forEach(recognizePiiEntitiesResult -> {
Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities);
assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesRunner((inputs) ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null))
.assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, options))
.assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizePiiLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language, null))
.assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, options))
.assertNext(response -> validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(10, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(17, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(25, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(9, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(10, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(121, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizePiiDomainFilterRunner((document, options) ->
StepVerifier.create(client.recognizePiiEntities(document, "en", options))
.assertNext(response -> validatePiiEntities(getPiiEntitiesList1ForDomainFilter(),
response.stream().collect(Collectors.toList())))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputStringForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizePiiLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language,
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION)))
.assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntitiesForDomainFilter(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesRunner((inputs) ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs,
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION)))
.assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntitiesForDomainFilter(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputForCategoriesFilter(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
(inputs, options) ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, "en", options))
.assertNext(
resultCollection -> validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntityWithCategoriesFilterFromOtherResult(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
(inputs, options) -> {
List<PiiEntityCategory> categories = new ArrayList<>();
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, "en", options))
.assertNext(
resultCollection -> {
resultCollection.forEach(result -> result.getEntities().forEach(piiEntity -> {
final PiiEntityCategory category = piiEntity.getCategory();
if (PiiEntityCategory.ABA_ROUTING_NUMBER == category
|| PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER == category) {
categories.add(category);
}
}));
validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection);
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
final PiiEntityCategory[] piiEntityCategories = categories.toArray(new PiiEntityCategory[categories.size()]);
options.setCategoriesFilter(piiEntityCategories);
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, "en", options))
.assertNext(
resultCollection -> validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeLinkedEntitiesForSingleTextInputRunner(input ->
StepVerifier.create(client.recognizeLinkedEntities(input))
.assertNext(response -> validateLinkedEntity(getLinkedEntitiesList1().get(0), response.iterator().next()))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(input ->
StepVerifier.create(client.recognizeLinkedEntities(input))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchLinkedEntityRunner((inputs) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null))
.assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(false,
getExpectedBatchLinkedEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, options))
.assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeLinkedStringInputRunner((inputs) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null))
.assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeLinkedLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language, null))
.assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options))
.assertNext(response -> validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(15, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(22, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(30, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(14, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(15, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(126, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractKeyPhrasesForSingleTextInputRunner(input ->
StepVerifier.create(client.extractKeyPhrases(input))
.assertNext(keyPhrasesCollection -> validateKeyPhrases(asList("monde"),
keyPhrasesCollection.stream().collect(Collectors.toList())))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(input ->
StepVerifier.create(client.extractKeyPhrases(input))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractBatchKeyPhrasesRunner((inputs) ->
StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null))
.assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractBatchKeyPhrasesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, options))
.assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractKeyPhrasesStringInputRunner((inputs) ->
StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null))
.assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractKeyPhrasesLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language, null))
.assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options))
.assertNext(response -> validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
/**
* Test analyzing sentiment for a string input.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentForSingleTextInputRunner(input ->
StepVerifier.create(client.analyzeSentiment(input))
.assertNext(response -> validateDocumentSentiment(false, getExpectedDocumentSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT)
);
}
/**
* Test analyzing sentiment for a string input with default language hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInputWithDefaultLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentForSingleTextInputRunner(input ->
StepVerifier.create(client.analyzeSentiment(input, null))
.assertNext(response -> validateDocumentSentiment(false, getExpectedDocumentSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT)
);
}
/**
* Test analyzing sentiment for a string input and verifying the result of opinion mining.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInputWithOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentForTextInputWithOpinionMiningRunner((input, options) ->
StepVerifier.create(client.analyzeSentiment(input, "en", options))
.assertNext(response -> validateDocumentSentiment(true, getExpectedDocumentSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an TextAnalyticsException is thrown for an empty document.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(document ->
StepVerifier.create(client.analyzeSentiment(document))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT)
);
}
/**
* Test analyzing sentiment for a duplicate ID list.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, new TextAnalyticsRequestOptions()))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an invalid document exception is returned for input documents with an empty ID.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* String documents with null TextAnalyticsRequestOptions and null language code which will use the default language
* code, 'en'.
*
* {@link TextAnalyticsAsyncClient
* which TextAnalyticsRequestOptions is null and null language code which will use the default language code, 'en'.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentStringInputRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, new TextAnalyticsRequestOptions()))
.assertNext(response -> validateAnalyzeSentimentResultCollection(false, false,
getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* String documents with null TextAnalyticsRequestOptions and given a language code.
*
* {@link TextAnalyticsAsyncClient
* which TextAnalyticsRequestOptions is null and given a language code.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringWithLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, language, new TextAnalyticsRequestOptions()))
.assertNext(response -> validateAnalyzeSentimentResultCollection(false, false, getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result includes request statistics but not sentence options when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which to show the request statistics only and verify the analyzed sentiment result.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringShowStatisticsExcludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options.setIncludeOpinionMining(false)))
.assertNext(response -> validateAnalyzeSentimentResultCollection(true, false, getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result includes sentence options but not request statistics when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) -> {
options.setIncludeStatistics(false);
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options))
.assertNext(response -> validateAnalyzeSentimentResultCollection(false, true, getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
});
}
/**
* Verify that the collection result includes sentence options and request statistics when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringShowStatisticsAndIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options))
.assertNext(response -> validateAnalyzeSentimentResultCollection(true, true, getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* TextDocumentInput documents with null TextAnalyticsRequestOptions.
*
* {@link TextAnalyticsAsyncClient
* which TextAnalyticsRequestOptions is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputWithNullRequestOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, (TextAnalyticsRequestOptions) null))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(false, false, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that we can get statistics on the collection result when given a batch of
* TextDocumentInput documents with TextAnalyticsRequestOptions.
*
* {@link TextAnalyticsAsyncClient
* which TextAnalyticsRequestOptions includes request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentShowStatsRunner((inputs, requestOptions) ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, requestOptions))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(true, false, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* TextDocumentInput documents with null AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputWithNullAnalyzeSentimentOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, (AnalyzeSentimentOptions) null))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(false, false, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result includes request statistics but not sentence options when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes request statistics but not opinion mining.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatisticsExcludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options.setIncludeOpinionMining(false)))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(true, false, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result includes sentence options but not request statistics when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes opinion mining but not request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) -> {
options.setIncludeStatistics(false);
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options))
.assertNext(response ->
validateAnalyzeSentimentResultCollectionWithResponse(false, true, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
});
}
/**
* Verify that the collection result includes sentence options and request statistics when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatisticsAndIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(true, true, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an InvalidDocumentBatch exception is returned for input documents with too many documents.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(opinionSentiment -> {
assertEquals(7, opinionSentiment.getLength());
assertEquals(17, opinionSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiWithSkinToneModifier(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(27, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(19, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(9, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(
result -> result.getSentences().forEach(
sentenceSentiment -> {
assertEquals(34, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(26, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(16, targetSentiment.getOffset());
});
})
)
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(
result -> result.getSentences().forEach(
sentenceSentiment -> {
assertEquals(42, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(34, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(24, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(26, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(18, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(8, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(
sentenceSentiment -> {
assertEquals(27, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(19, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(9, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(138, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(130, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(120, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareStringInputWithoutOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
healthcareStringInputRunner((documents, dummyOptions) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
false,
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareStringInputWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
healthcareStringInputRunner((documents, options) -> {
boolean isValidApiVersionForDisplayName = serviceVersion != TextAnalyticsServiceVersion.V3_0
&& serviceVersion != TextAnalyticsServiceVersion.V3_1;
if (isValidApiVersionForDisplayName) {
options.setDisplayName("operationName");
}
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<AnalyzeHealthcareEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
if (isValidApiVersionForDisplayName) {
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
}
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
healthcareLroRunner((documents, options) -> {
boolean isValidApiVersionForDisplayName = serviceVersion != TextAnalyticsServiceVersion.V3_0
&& serviceVersion != TextAnalyticsServiceVersion.V3_1;
if (isValidApiVersionForDisplayName) {
options.setDisplayName("operationName");
}
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<AnalyzeHealthcareEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
if (isValidApiVersionForDisplayName) {
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
}
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareLroPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
healthcareLroPaginationRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForMultiplePages(0, 10, 0),
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList()));
}, 10);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareLroEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyListRunner((documents, errorMessage) -> {
StepVerifier.create(client.beginAnalyzeHealthcareEntities(documents, null))
.expectErrorMatches(throwable -> throwable instanceof IllegalArgumentException
&& errorMessage.equals(throwable.getMessage()))
.verify(DEFAULT_TIMEOUT);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@Disabled("https:
public void analyzeHealthcareEntitiesEmojiUnicodeCodePoint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(22, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(29, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(37, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesDiacriticsNfc(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(21, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesDiacriticsNfd(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(22, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(133, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesForAssertion(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeHealthcareEntitiesForAssertionRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
final HealthcareEntityAssertion assertion =
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList())
.get(0).stream().collect(Collectors.toList())
.get(0).getEntities().stream().collect(Collectors.toList())
.get(1)
.getAssertion();
assertEquals(EntityConditionality.HYPOTHETICAL, assertion.getConditionality());
assertNull(assertion.getAssociation());
assertNull(assertion.getCertainty());
});
}
@Disabled("Temporary disable it for green test")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void cancelHealthcareLro(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
cancelHealthcareLroRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.cancelOperation();
LongRunningOperationStatus operationStatus = syncPoller.poll().getStatus();
while (!LongRunningOperationStatus.USER_CANCELLED.equals(operationStatus)) {
operationStatus = syncPoller.poll().getStatus();
}
syncPoller.waitForCompletion();
Assertions.assertEquals(LongRunningOperationStatus.USER_CANCELLED, operationStatus);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchActionsRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW,
getRecognizePiiEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getAnalyzeSentimentResultCollectionForActions(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithMultiSameKindActions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeActionsWithMultiSameKindActionsRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(actionsResult -> {
assertEquals(2, actionsResult.getRecognizeEntitiesResults().stream().count());
assertEquals(2, actionsResult.getRecognizePiiEntitiesResults().stream().count());
assertEquals(2, actionsResult.getRecognizeLinkedEntitiesResults().stream().count());
assertEquals(2, actionsResult.getAnalyzeSentimentResults().stream().count());
assertEquals(2, actionsResult.getExtractKeyPhrasesResults().stream().count());
});
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithActionNames(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeActionsWithActionNamesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(actionsResult -> {
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getRecognizeEntitiesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getRecognizePiiEntitiesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getAnalyzeSentimentResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getExtractKeyPhrasesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
});
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchActionsPaginationRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux>
syncPoller = client.beginAnalyzeActions(
documents, tasks, new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
getExpectedAnalyzeActionsResultListForMultiplePages(0, 20, 2),
result.toStream().collect(Collectors.toList()));
}, 22);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyListRunner((documents, errorMessage) ->
StepVerifier.create(client.beginAnalyzeActions(documents,
new TextAnalyticsActions()
.setRecognizeEntitiesActions(new RecognizeEntitiesAction()), null))
.expectErrorMatches(throwable -> throwable instanceof IllegalArgumentException
&& errorMessage.equals(throwable.getMessage()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeEntitiesRecognitionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeEntitiesRecognitionRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
}
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzePiiEntityRecognitionWithCategoriesFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzePiiEntityRecognitionWithCategoriesFiltersRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getExpectedBatchPiiEntitiesForCategoriesFilter(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
}
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzePiiEntityRecognitionWithDomainFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzePiiEntityRecognitionWithDomainFiltersRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getExpectedBatchPiiEntitiesForDomainFilter(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
}
);
}
@Disabled("Linked entity action do not work")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeLinkedEntityActions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeLinkedEntityRecognitionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(
false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizeLinkedEntitiesActionResult(false, null,
TIME_NOW, getRecognizeLinkedEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeKeyPhrasesExtractionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractKeyPhrasesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getExpectedBatchTextSentiment(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeHealthcareEntitiesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedAnalyzeHealthcareEntitiesActionResult(false, null, TIME_NOW,
getExpectedAnalyzeHealthcareEntitiesResultCollection(2,
asList(
getRecognizeHealthcareEntitiesResult1("0"),
getRecognizeHealthcareEntitiesResult2())),
null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntitiesAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getRecognizeCustomEntitiesResults().forEach(
customEntitiesActionResult -> customEntitiesActionResult.getDocumentsResults().forEach(
documentResult -> validateCategorizedEntities(
documentResult.getEntities().stream().collect(Collectors.toList())))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassificationAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomSingleCategoryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getSingleLabelClassifyResults().forEach(
customSingleCategoryActionResult -> customSingleCategoryActionResult.getDocumentsResults().forEach(
documentResult -> validateLabelClassificationResult(documentResult))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiCategoryClassifyAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomMultiCategoryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getMultiLabelClassifyResults().forEach(
customMultiCategoryActionResult -> customMultiCategoryActionResult.getDocumentsResults().forEach(
documentResult -> validateLabelClassificationResult(documentResult))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntitiesStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesRunner((documents, parameters) -> {
SyncPoller<RecognizeCustomEntitiesOperationDetail, RecognizeCustomEntitiesPagedFlux> syncPoller =
client.beginRecognizeCustomEntities(documents, parameters.get(0), parameters.get(1)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
RecognizeCustomEntitiesPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult ->
validateCategorizedEntities(documentResult.getEntities().stream().collect(Collectors.toList()))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntities(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesRunner((documents, parameters) -> {
RecognizeCustomEntitiesOptions options = new RecognizeCustomEntitiesOptions()
.setDisplayName("operationName");
SyncPoller<RecognizeCustomEntitiesOperationDetail, RecognizeCustomEntitiesPagedFlux> syncPoller =
client.beginRecognizeCustomEntities(documents, parameters.get(0), parameters.get(1), "en", options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<RecognizeCustomEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
RecognizeCustomEntitiesPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult ->
validateCategorizedEntities(documentResult.getEntities().stream().collect(Collectors.toList()))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassificationStringInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomSingleLabelRunner((documents, parameters) -> {
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedFlux> syncPoller =
client.beginSingleLabelClassify(documents, parameters.get(0), parameters.get(1))
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ClassifyDocumentPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomSingleLabelRunner((documents, parameters) -> {
SingleLabelClassifyOptions options = new SingleLabelClassifyOptions().setDisplayName("operationName");
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedFlux> syncPoller =
client.beginSingleLabelClassify(documents, parameters.get(0), parameters.get(1), "en", options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<ClassifyDocumentOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
ClassifyDocumentPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiLabelClassificationStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomMultiLabelRunner((documents, parameters) -> {
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedFlux> syncPoller =
client.beginMultiLabelClassify(documents, parameters.get(0), parameters.get(1))
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ClassifyDocumentPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomMultiLabelRunner((documents, parameters) -> {
MultiLabelClassifyOptions options = new MultiLabelClassifyOptions().setDisplayName("operationName");
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedFlux> syncPoller =
client.beginMultiLabelClassify(documents, parameters.get(0), parameters.get(1), "en", options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<ClassifyDocumentOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
ClassifyDocumentPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExtractiveSummaryActionResult(false, null,
TIME_NOW,
getExpectedExtractiveSummaryResultCollection(getExpectedExtractiveSummaryResultSortByOffset()),
null))),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
}, null, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionSortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(isAscendingOrderByOffSet(
documentResult.getSentences().stream().collect(Collectors.toList()))))));
}, 4, ExtractiveSummarySentencesOrder.OFFSET);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionSortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(isDescendingOrderByRankScore(
documentResult.getSentences().stream().collect(Collectors.toList()))))));
}, 4, ExtractiveSummarySentencesOrder.RANK);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithSentenceCountLessThanMaxCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(
documentResult.getSentences().stream().collect(Collectors.toList()).size() < 20))));
}, 20, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithNonDefaultSentenceCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertEquals(
documentResult.getSentences().stream().collect(Collectors.toList()).size(), 5))));
}, 5, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionMaxSentenceCountInvalidRangeException(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
int[] invalidMaxSentenceCounts = {0, 21};
for (int invalidCount: invalidMaxSentenceCounts) {
extractiveSummaryActionRunner(
(documents, tasks) -> {
HttpResponseException exception = assertThrows(HttpResponseException.class, () -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions())
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
});
assertEquals(
TextAnalyticsErrorCode.INVALID_PARAMETER_VALUE,
((TextAnalyticsError) exception.getValue()).getErrorCode());
}, invalidCount, null);
}
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeAbstractiveSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
abstractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getAbstractiveSummaryActionResult(false, null,
TIME_NOW,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
null
)))
)),
result.toStream().collect(Collectors.toList()));
}, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryDuplicateIdInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
StepVerifier.create(client.beginAbstractSummary(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT);
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
StepVerifier.create(client.beginAbstractSummary(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT);
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
StepVerifier.create(client.beginAbstractSummary(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
abstractiveSummaryRunner((documents, options) -> {
SyncPoller<AbstractiveSummaryOperationDetail, AbstractiveSummaryPagedFlux> syncPoller =
client.beginAbstractSummary(documents)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AbstractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResult -> validateAbstractiveSummaryResultCollection(false,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
documentResult));
}, 4);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
abstractiveSummaryMaxOverloadRunner((documents, options) -> {
SyncPoller<AbstractiveSummaryOperationDetail, AbstractiveSummaryPagedFlux> syncPoller =
client.beginAbstractSummary(documents, options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AbstractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResult -> validateAbstractiveSummaryResultCollection(false,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
documentResult));
}, 4);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
isAscendingOrderByOffSet(documentResult.getSentences().stream().collect(Collectors.toList())))
));
}, 4, ExtractiveSummarySentencesOrder.OFFSET);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
isDescendingOrderByRankScore(documentResult.getSentences().stream().collect(Collectors.toList())))
));
}, 4, ExtractiveSummarySentencesOrder.RANK);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySentenceCountLessThanMaxCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
documentResult.getSentences().stream().collect(Collectors.toList()).size() < 20)));
}, 20, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummaryNonDefaultSentenceCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertEquals(
documentResult.getSentences().stream().collect(Collectors.toList()).size(), 5)));
}, 5, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummaryMaxSentenceCountInvalidRangeException(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
int[] invalidMaxSentenceCounts = {0, 21};
for (int invalidCount: invalidMaxSentenceCounts) {
extractiveSummaryRunner(
(documents, options) -> {
HttpResponseException exception = assertThrows(HttpResponseException.class, () -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
});
assertEquals(
TextAnalyticsErrorCode.INVALID_PARAMETER_VALUE,
((TextAnalyticsError) exception.getValue()).getErrorCode());
}, invalidCount, null);
}
}
} | class TextAnalyticsAsyncClientTest extends TextAnalyticsClientTestBase {
private static final Duration DEFAULT_TIMEOUT = Duration.ofSeconds(30);
private TextAnalyticsAsyncClient client;
private HttpClient buildAsyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertAsync()
.build();
}
private TextAnalyticsAsyncClient getTextAnalyticsAsyncClient(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion, boolean isStaticResource) {
return getTextAnalyticsClientBuilder(
buildAsyncAssertingClient(interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient),
serviceVersion,
isStaticResource)
.buildAsyncClient();
}
/**
* Verify that we can get statistics on the collection result when given a batch of documents with request options.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageShowStatisticsRunner((inputs, options) ->
StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options))
.assertNext(response ->
validateDetectLanguageResultCollectionWithResponse(true, getExpectedBatchDetectedLanguages(),
200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Test to detect language for each {@code DetectLanguageResult} input of a batch.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageRunner((inputs) ->
StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null))
.assertNext(response ->
validateDetectLanguageResultCollectionWithResponse(false, getExpectedBatchDetectedLanguages(),
200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Test to detect language for each string input of batch with given country hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguagesCountryHintRunner((inputs, countryHint) ->
StepVerifier.create(client.detectLanguageBatch(inputs, countryHint, null))
.assertNext(actualResults ->
validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), actualResults))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Test to detect language for each string input of batch with request options.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) ->
StepVerifier.create(client.detectLanguageBatch(inputs, null, options))
.assertNext(response -> validateDetectLanguageResultCollection(true, getExpectedBatchDetectedLanguages(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Test to detect language for each string input of batch.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageStringInputRunner((inputs) ->
StepVerifier.create(client.detectLanguageBatch(inputs, null, null))
.assertNext(response -> validateDetectLanguageResultCollection(false, getExpectedBatchDetectedLanguages(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that a single DetectedLanguage is returned for a document to detectLanguage.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectSingleTextLanguageRunner(input ->
StepVerifier.create(client.detectLanguage(input))
.assertNext(response -> validatePrimaryLanguage(getDetectedLanguageEnglish(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an TextAnalyticsException is thrown for a document with invalid country hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageInvalidCountryHintRunner((input, countryHint) ->
StepVerifier.create(client.detectLanguage(input, countryHint))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_COUNTRY_HINT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that TextAnalyticsException is thrown for an empty document.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(input ->
StepVerifier.create(client.detectLanguage(input))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that a bad request exception is returned for input documents with same ids.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageDuplicateIdRunner((inputs, options) ->
StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, options))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an invalid document exception is returned for input documents with an empty ID.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageInputEmptyIdRunner(inputs ->
StepVerifier.create(client.detectLanguageBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that with countryHint with empty string will not throw exception.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageEmptyCountryHintRunner((input, countryHint) ->
StepVerifier.create(client.detectLanguage(input, countryHint))
.assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that with countryHint with "none" will not throw exception.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
detectLanguageNoneCountryHintRunner((input, countryHint) ->
StepVerifier.create(client.detectLanguage(input, countryHint))
.assertNext(response -> validatePrimaryLanguage(getDetectedLanguageSpanish(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeCategorizedEntitiesForSingleTextInputRunner(input ->
StepVerifier.create(client.recognizeEntities(input))
.assertNext(response -> validateCategorizedEntities(response.stream().collect(Collectors.toList())))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(input ->
StepVerifier.create(client.recognizeEntities(input))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntitySingleErrorRunner((inputs) ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null))
.assertNext(resultCollection -> resultCollection.getValue().forEach(recognizeEntitiesResult -> {
Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities);
assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntityRunner((inputs) ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, null))
.assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200, response))
.verifyComplete());
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizeEntitiesBatchWithResponse(inputs, options))
.assertNext(response -> validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeCategorizedEntityStringInputRunner((inputs) ->
StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null))
.assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.recognizeEntitiesBatch(inputs, language, null))
.assertNext(response -> validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, options))
.assertNext(response -> validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.recognizeEntitiesBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@Disabled("https:
public void recognizeEntitiesBatchWithResponseEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(15, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(22, categorizedEntity.getOffset());
})).verifyComplete(), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(30, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(14, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(15, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
StepVerifier.create(client.recognizeEntities(document))
.assertNext(result -> result.forEach(categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(126, categorizedEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizePiiSingleDocumentRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(response -> validatePiiEntities(getPiiEntitiesList1(), response.stream().collect(Collectors.toList())))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(document -> StepVerifier.create(client.recognizePiiEntities(document))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitySingleErrorRunner((inputs) ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null))
.assertNext(resultCollection -> resultCollection.getValue().forEach(recognizePiiEntitiesResult -> {
Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities);
assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesRunner((inputs) ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, null))
.assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs, options))
.assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizePiiLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language, null))
.assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, options))
.assertNext(response -> validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(10, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(17, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(25, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(9, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(10, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
StepVerifier.create(client.recognizePiiEntities(document))
.assertNext(result -> result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(121, piiEntity.getOffset());
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), PII_ENTITY_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizePiiDomainFilterRunner((document, options) ->
StepVerifier.create(client.recognizePiiEntities(document, "en", options))
.assertNext(response -> validatePiiEntities(getPiiEntitiesList1ForDomainFilter(),
response.stream().collect(Collectors.toList())))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputStringForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizePiiLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, language,
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION)))
.assertNext(response -> validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntitiesForDomainFilter(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesRunner((inputs) ->
StepVerifier.create(client.recognizePiiEntitiesBatchWithResponse(inputs,
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION)))
.assertNext(response -> validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntitiesForDomainFilter(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputForCategoriesFilter(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
(inputs, options) ->
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, "en", options))
.assertNext(
resultCollection -> validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntityWithCategoriesFilterFromOtherResult(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
(inputs, options) -> {
List<PiiEntityCategory> categories = new ArrayList<>();
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, "en", options))
.assertNext(
resultCollection -> {
resultCollection.forEach(result -> result.getEntities().forEach(piiEntity -> {
final PiiEntityCategory category = piiEntity.getCategory();
if (PiiEntityCategory.ABA_ROUTING_NUMBER == category
|| PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER == category) {
categories.add(category);
}
}));
validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection);
})
.expectComplete()
.verify(DEFAULT_TIMEOUT);
final PiiEntityCategory[] piiEntityCategories = categories.toArray(new PiiEntityCategory[categories.size()]);
options.setCategoriesFilter(piiEntityCategories);
StepVerifier.create(client.recognizePiiEntitiesBatch(inputs, "en", options))
.assertNext(
resultCollection -> validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeLinkedEntitiesForSingleTextInputRunner(input ->
StepVerifier.create(client.recognizeLinkedEntities(input))
.assertNext(response -> validateLinkedEntity(getLinkedEntitiesList1().get(0), response.iterator().next()))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(input ->
StepVerifier.create(client.recognizeLinkedEntities(input))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchLinkedEntityRunner((inputs) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, null))
.assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(false,
getExpectedBatchLinkedEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatchWithResponse(inputs, options))
.assertNext(response -> validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeLinkedStringInputRunner((inputs) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null))
.assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeLinkedLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, language, null))
.assertNext(response -> validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, options))
.assertNext(response -> validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.recognizeLinkedEntitiesBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(15, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(22, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(30, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(14, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(15, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
StepVerifier.create(client.recognizeLinkedEntities(document))
.assertNext(result -> result.forEach(linkedEntity -> {
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(126, linkedEntityMatch.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT), LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractKeyPhrasesForSingleTextInputRunner(input ->
StepVerifier.create(client.extractKeyPhrases(input))
.assertNext(keyPhrasesCollection -> validateKeyPhrases(asList("monde"),
keyPhrasesCollection.stream().collect(Collectors.toList())))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(input ->
StepVerifier.create(client.extractKeyPhrases(input))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractBatchKeyPhrasesRunner((inputs) ->
StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, null))
.assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractBatchKeyPhrasesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.extractKeyPhrasesBatchWithResponse(inputs, options))
.assertNext(response -> validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractKeyPhrasesStringInputRunner((inputs) ->
StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null))
.assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractKeyPhrasesLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.extractKeyPhrasesBatch(inputs, language, null))
.assertNext(response -> validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) ->
StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, options))
.assertNext(response -> validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.extractKeyPhrasesBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
/**
* Test analyzing sentiment for a string input.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentForSingleTextInputRunner(input ->
StepVerifier.create(client.analyzeSentiment(input))
.assertNext(response -> validateDocumentSentiment(false, getExpectedDocumentSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT)
);
}
/**
* Test analyzing sentiment for a string input with default language hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInputWithDefaultLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentForSingleTextInputRunner(input ->
StepVerifier.create(client.analyzeSentiment(input, null))
.assertNext(response -> validateDocumentSentiment(false, getExpectedDocumentSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT)
);
}
/**
* Test analyzing sentiment for a string input and verifying the result of opinion mining.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInputWithOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentForTextInputWithOpinionMiningRunner((input, options) ->
StepVerifier.create(client.analyzeSentiment(input, "en", options))
.assertNext(response -> validateDocumentSentiment(true, getExpectedDocumentSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an TextAnalyticsException is thrown for an empty document.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyTextRunner(document ->
StepVerifier.create(client.analyzeSentiment(document))
.expectErrorMatches(throwable -> throwable instanceof TextAnalyticsException
&& INVALID_DOCUMENT.equals(((TextAnalyticsException) throwable).getErrorCode()))
.verify(DEFAULT_TIMEOUT)
);
}
/**
* Test analyzing sentiment for a duplicate ID list.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, new TextAnalyticsRequestOptions()))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an invalid document exception is returned for input documents with an empty ID.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* String documents with null TextAnalyticsRequestOptions and null language code which will use the default language
* code, 'en'.
*
* {@link TextAnalyticsAsyncClient
* which TextAnalyticsRequestOptions is null and null language code which will use the default language code, 'en'.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentStringInputRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, new TextAnalyticsRequestOptions()))
.assertNext(response -> validateAnalyzeSentimentResultCollection(false, false,
getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* String documents with null TextAnalyticsRequestOptions and given a language code.
*
* {@link TextAnalyticsAsyncClient
* which TextAnalyticsRequestOptions is null and given a language code.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringWithLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentLanguageHintRunner((inputs, language) ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, language, new TextAnalyticsRequestOptions()))
.assertNext(response -> validateAnalyzeSentimentResultCollection(false, false, getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result includes request statistics but not sentence options when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which to show the request statistics only and verify the analyzed sentiment result.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringShowStatisticsExcludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options.setIncludeOpinionMining(false)))
.assertNext(response -> validateAnalyzeSentimentResultCollection(true, false, getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result includes sentence options but not request statistics when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) -> {
options.setIncludeStatistics(false);
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options))
.assertNext(response -> validateAnalyzeSentimentResultCollection(false, true, getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
});
}
/**
* Verify that the collection result includes sentence options and request statistics when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringShowStatisticsAndIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, options))
.assertNext(response -> validateAnalyzeSentimentResultCollection(true, true, getExpectedBatchTextSentiment(), response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* TextDocumentInput documents with null TextAnalyticsRequestOptions.
*
* {@link TextAnalyticsAsyncClient
* which TextAnalyticsRequestOptions is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputWithNullRequestOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, (TextAnalyticsRequestOptions) null))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(false, false, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that we can get statistics on the collection result when given a batch of
* TextDocumentInput documents with TextAnalyticsRequestOptions.
*
* {@link TextAnalyticsAsyncClient
* which TextAnalyticsRequestOptions includes request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentShowStatsRunner((inputs, requestOptions) ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, requestOptions))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(true, false, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* TextDocumentInput documents with null AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputWithNullAnalyzeSentimentOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, (AnalyzeSentimentOptions) null))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(false, false, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result includes request statistics but not sentence options when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes request statistics but not opinion mining.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatisticsExcludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options.setIncludeOpinionMining(false)))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(true, false, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verify that the collection result includes sentence options but not request statistics when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes opinion mining but not request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) -> {
options.setIncludeStatistics(false);
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options))
.assertNext(response ->
validateAnalyzeSentimentResultCollectionWithResponse(false, true, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT);
});
}
/**
* Verify that the collection result includes sentence options and request statistics when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsAsyncClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatisticsAndIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
StepVerifier.create(client.analyzeSentimentBatchWithResponse(inputs, options))
.assertNext(response -> validateAnalyzeSentimentResultCollectionWithResponse(true, true, getExpectedBatchTextSentiment(), 200, response))
.expectComplete()
.verify(DEFAULT_TIMEOUT));
}
/**
* Verifies that an InvalidDocumentBatch exception is returned for input documents with too many documents.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs ->
StepVerifier.create(client.analyzeSentimentBatch(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(opinionSentiment -> {
assertEquals(7, opinionSentiment.getLength());
assertEquals(17, opinionSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiWithSkinToneModifier(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(27, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(19, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(9, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(
result -> result.getSentences().forEach(
sentenceSentiment -> {
assertEquals(34, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(26, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(16, targetSentiment.getOffset());
});
})
)
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(
result -> result.getSentences().forEach(
sentenceSentiment -> {
assertEquals(42, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(34, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(24, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(26, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(18, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(8, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(
sentenceSentiment -> {
assertEquals(27, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(19, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(9, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(
document ->
StepVerifier.create(client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true)))
.assertNext(result -> result.getSentences().forEach(sentenceSentiment -> {
assertEquals(138, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(130, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(120, targetSentiment.getOffset());
});
}))
.expectComplete()
.verify(DEFAULT_TIMEOUT),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareStringInputWithoutOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
healthcareStringInputRunner((documents, dummyOptions) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
false,
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareStringInputWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
healthcareStringInputRunner((documents, options) -> {
boolean isValidApiVersionForDisplayName = serviceVersion != TextAnalyticsServiceVersion.V3_0
&& serviceVersion != TextAnalyticsServiceVersion.V3_1;
if (isValidApiVersionForDisplayName) {
options.setDisplayName("operationName");
}
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<AnalyzeHealthcareEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
if (isValidApiVersionForDisplayName) {
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
}
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
healthcareLroRunner((documents, options) -> {
boolean isValidApiVersionForDisplayName = serviceVersion != TextAnalyticsServiceVersion.V3_0
&& serviceVersion != TextAnalyticsServiceVersion.V3_1;
if (isValidApiVersionForDisplayName) {
options.setDisplayName("operationName");
}
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<AnalyzeHealthcareEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
if (isValidApiVersionForDisplayName) {
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
}
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareLroPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
healthcareLroPaginationRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForMultiplePages(0, 10, 0),
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList()));
}, 10);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareLroEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyListRunner((documents, errorMessage) -> {
StepVerifier.create(client.beginAnalyzeHealthcareEntities(documents, null))
.expectErrorMatches(throwable -> throwable instanceof IllegalArgumentException
&& errorMessage.equals(throwable.getMessage()))
.verify(DEFAULT_TIMEOUT);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@Disabled("https:
public void analyzeHealthcareEntitiesEmojiUnicodeCodePoint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(22, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(29, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(37, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesDiacriticsNfc(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(21, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesDiacriticsNfd(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(22, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfcRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
koreanNfdRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
zalgoTextRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedFlux.toStream().forEach(result -> result.forEach(
entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(133, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesForAssertion(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeHealthcareEntitiesForAssertionRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedFlux analyzeHealthcareEntitiesPagedFlux = syncPoller.getFinalResult();
final HealthcareEntityAssertion assertion =
analyzeHealthcareEntitiesPagedFlux.toStream().collect(Collectors.toList())
.get(0).stream().collect(Collectors.toList())
.get(0).getEntities().stream().collect(Collectors.toList())
.get(1)
.getAssertion();
assertEquals(EntityConditionality.HYPOTHETICAL, assertion.getConditionality());
assertNull(assertion.getAssociation());
assertNull(assertion.getCertainty());
});
}
@Disabled("Temporary disable it for green test")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void cancelHealthcareLro(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
cancelHealthcareLroRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedFlux>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.cancelOperation();
LongRunningOperationStatus operationStatus = syncPoller.poll().getStatus();
while (!LongRunningOperationStatus.USER_CANCELLED.equals(operationStatus)) {
operationStatus = syncPoller.poll().getStatus();
}
syncPoller.waitForCompletion();
Assertions.assertEquals(LongRunningOperationStatus.USER_CANCELLED, operationStatus);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchActionsRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW,
getRecognizePiiEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getAnalyzeSentimentResultCollectionForActions(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithMultiSameKindActions(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeActionsWithMultiSameKindActionsRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(actionsResult -> {
assertEquals(2, actionsResult.getRecognizeEntitiesResults().stream().count());
assertEquals(2, actionsResult.getRecognizePiiEntitiesResults().stream().count());
assertEquals(2, actionsResult.getRecognizeLinkedEntitiesResults().stream().count());
assertEquals(2, actionsResult.getAnalyzeSentimentResults().stream().count());
assertEquals(2, actionsResult.getExtractKeyPhrasesResults().stream().count());
});
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithActionNames(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeActionsWithActionNamesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(actionsResult -> {
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getRecognizeEntitiesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getRecognizePiiEntitiesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getAnalyzeSentimentResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getExtractKeyPhrasesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
});
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeBatchActionsPaginationRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux>
syncPoller = client.beginAnalyzeActions(
documents, tasks, new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
getExpectedAnalyzeActionsResultListForMultiplePages(0, 20, 2),
result.toStream().collect(Collectors.toList()));
}, 22);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyListRunner((documents, errorMessage) ->
StepVerifier.create(client.beginAnalyzeActions(documents,
new TextAnalyticsActions()
.setRecognizeEntitiesActions(new RecognizeEntitiesAction()), null))
.expectErrorMatches(throwable -> throwable instanceof IllegalArgumentException
&& errorMessage.equals(throwable.getMessage()))
.verify(DEFAULT_TIMEOUT));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeEntitiesRecognitionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeEntitiesRecognitionRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
}
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzePiiEntityRecognitionWithCategoriesFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzePiiEntityRecognitionWithCategoriesFiltersRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getExpectedBatchPiiEntitiesForCategoriesFilter(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
}
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzePiiEntityRecognitionWithDomainFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzePiiEntityRecognitionWithDomainFiltersRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getExpectedBatchPiiEntitiesForDomainFilter(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
}
);
}
@Disabled("Linked entity action do not work")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeLinkedEntityActions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeLinkedEntityRecognitionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(
false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizeLinkedEntitiesActionResult(false, null,
TIME_NOW, getRecognizeLinkedEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeKeyPhrasesExtractionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractKeyPhrasesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeSentimentRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getExpectedBatchTextSentiment(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
analyzeHealthcareEntitiesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedAnalyzeHealthcareEntitiesActionResult(false, null, TIME_NOW,
getExpectedAnalyzeHealthcareEntitiesResultCollection(2,
asList(
getRecognizeHealthcareEntitiesResult1("0"),
getRecognizeHealthcareEntitiesResult2())),
null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntitiesAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getRecognizeCustomEntitiesResults().forEach(
customEntitiesActionResult -> customEntitiesActionResult.getDocumentsResults().forEach(
documentResult -> validateCategorizedEntities(
documentResult.getEntities().stream().collect(Collectors.toList())))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassificationAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomSingleCategoryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getSingleLabelClassifyResults().forEach(
customSingleCategoryActionResult -> customSingleCategoryActionResult.getDocumentsResults().forEach(
documentResult -> validateLabelClassificationResult(documentResult))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiCategoryClassifyAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomMultiCategoryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getMultiLabelClassifyResults().forEach(
customMultiCategoryActionResult -> customMultiCategoryActionResult.getDocumentsResults().forEach(
documentResult -> validateLabelClassificationResult(documentResult))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntitiesStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesRunner((documents, parameters) -> {
SyncPoller<RecognizeCustomEntitiesOperationDetail, RecognizeCustomEntitiesPagedFlux> syncPoller =
client.beginRecognizeCustomEntities(documents, parameters.get(0), parameters.get(1)).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
RecognizeCustomEntitiesPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult ->
validateCategorizedEntities(documentResult.getEntities().stream().collect(Collectors.toList()))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntities(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesRunner((documents, parameters) -> {
RecognizeCustomEntitiesOptions options = new RecognizeCustomEntitiesOptions()
.setDisplayName("operationName");
SyncPoller<RecognizeCustomEntitiesOperationDetail, RecognizeCustomEntitiesPagedFlux> syncPoller =
client.beginRecognizeCustomEntities(documents, parameters.get(0), parameters.get(1), "en", options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<RecognizeCustomEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
RecognizeCustomEntitiesPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult ->
validateCategorizedEntities(documentResult.getEntities().stream().collect(Collectors.toList()))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassificationStringInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomSingleLabelRunner((documents, parameters) -> {
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedFlux> syncPoller =
client.beginSingleLabelClassify(documents, parameters.get(0), parameters.get(1))
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ClassifyDocumentPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomSingleLabelRunner((documents, parameters) -> {
SingleLabelClassifyOptions options = new SingleLabelClassifyOptions().setDisplayName("operationName");
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedFlux> syncPoller =
client.beginSingleLabelClassify(documents, parameters.get(0), parameters.get(1), "en", options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<ClassifyDocumentOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
ClassifyDocumentPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiLabelClassificationStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomMultiLabelRunner((documents, parameters) -> {
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedFlux> syncPoller =
client.beginMultiLabelClassify(documents, parameters.get(0), parameters.get(1))
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ClassifyDocumentPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, true);
classifyCustomMultiLabelRunner((documents, parameters) -> {
MultiLabelClassifyOptions options = new MultiLabelClassifyOptions().setDisplayName("operationName");
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedFlux> syncPoller =
client.beginMultiLabelClassify(documents, parameters.get(0), parameters.get(1), "en", options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
PollResponse<ClassifyDocumentOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
ClassifyDocumentPagedFlux pagedFlux = syncPoller.getFinalResult();
pagedFlux.toStream().collect(Collectors.toList()).forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExtractiveSummaryActionResult(false, null,
TIME_NOW,
getExpectedExtractiveSummaryResultCollection(getExpectedExtractiveSummaryResultSortByOffset()),
null))),
IterableStream.of(null)
)),
result.toStream().collect(Collectors.toList()));
}, null, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionSortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(isAscendingOrderByOffSet(
documentResult.getSentences().stream().collect(Collectors.toList()))))));
}, 4, ExtractiveSummarySentencesOrder.OFFSET);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionSortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(isDescendingOrderByRankScore(
documentResult.getSentences().stream().collect(Collectors.toList()))))));
}, 4, ExtractiveSummarySentencesOrder.RANK);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithSentenceCountLessThanMaxCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(
documentResult.getSentences().stream().collect(Collectors.toList()).size() < 20))));
}, 20, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithNonDefaultSentenceCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.toStream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertEquals(
documentResult.getSentences().stream().collect(Collectors.toList()).size(), 5))));
}, 5, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionMaxSentenceCountInvalidRangeException(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
int[] invalidMaxSentenceCounts = {0, 21};
for (int invalidCount: invalidMaxSentenceCounts) {
extractiveSummaryActionRunner(
(documents, tasks) -> {
HttpResponseException exception = assertThrows(HttpResponseException.class, () -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions())
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
});
assertEquals(
TextAnalyticsErrorCode.INVALID_PARAMETER_VALUE,
((TextAnalyticsError) exception.getValue()).getErrorCode());
}, invalidCount, null);
}
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeAbstractiveSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
abstractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedFlux> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions()).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedFlux result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getAbstractiveSummaryActionResult(false, null,
TIME_NOW,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
null
)))
)),
result.toStream().collect(Collectors.toList()));
}, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryDuplicateIdInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
StepVerifier.create(client.beginAbstractSummary(inputs, null))
.expectErrorSatisfies(ex -> assertEquals(HttpResponseException.class, ex.getClass()))
.verify(DEFAULT_TIMEOUT);
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
StepVerifier.create(client.beginAbstractSummary(inputs, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT);
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
StepVerifier.create(client.beginAbstractSummary(inputs, null, null))
.expectErrorSatisfies(ex -> {
final HttpResponseException httpResponseException = (HttpResponseException) ex;
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
})
.verify(DEFAULT_TIMEOUT);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
abstractiveSummaryRunner((documents, options) -> {
SyncPoller<AbstractiveSummaryOperationDetail, AbstractiveSummaryPagedFlux> syncPoller =
client.beginAbstractSummary(documents)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AbstractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResult -> validateAbstractiveSummaryResultCollection(false,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
documentResult));
}, 4);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
abstractiveSummaryMaxOverloadRunner((documents, options) -> {
SyncPoller<AbstractiveSummaryOperationDetail, AbstractiveSummaryPagedFlux> syncPoller =
client.beginAbstractSummary(documents, options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AbstractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResult -> validateAbstractiveSummaryResultCollection(false,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
documentResult));
}, 4);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
isAscendingOrderByOffSet(documentResult.getSentences().stream().collect(Collectors.toList())))
));
}, 4, ExtractiveSummarySentencesOrder.OFFSET);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
isDescendingOrderByRankScore(documentResult.getSentences().stream().collect(Collectors.toList())))
));
}, 4, ExtractiveSummarySentencesOrder.RANK);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySentenceCountLessThanMaxCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
documentResult.getSentences().stream().collect(Collectors.toList()).size() < 20)));
}, 20, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummaryNonDefaultSentenceCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options).getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
result.toStream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertEquals(
documentResult.getSentences().stream().collect(Collectors.toList()).size(), 5)));
}, 5, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummaryMaxSentenceCountInvalidRangeException(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsAsyncClient(httpClient, serviceVersion, false);
int[] invalidMaxSentenceCounts = {0, 21};
for (int invalidCount: invalidMaxSentenceCounts) {
extractiveSummaryRunner(
(documents, options) -> {
HttpResponseException exception = assertThrows(HttpResponseException.class, () -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedFlux> syncPoller =
client.beginExtractSummary(documents, "en", options)
.getSyncPoller();
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedFlux result = syncPoller.getFinalResult();
});
assertEquals(
TextAnalyticsErrorCode.INVALID_PARAMETER_VALUE,
((TextAnalyticsError) exception.getValue()).getErrorCode());
}, invalidCount, null);
}
}
} |
There is a test regression that the Linked Entities Task doesn't work. This test tests if multiple actions work. So removing one from five tasks doesn't break the testing purpose. | public void analyzeActionsStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeActionsStringInputRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getRecognizePiiEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getAnalyzeSentimentResultCollectionForActions(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
} | IterableStream.of(null), | public void analyzeActionsStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeActionsStringInputRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getRecognizePiiEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getAnalyzeSentimentResultCollectionForActions(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
} | class TextAnalyticsClientTest extends TextAnalyticsClientTestBase {
private TextAnalyticsClient client;
private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertSync()
.build();
}
private TextAnalyticsClient getTextAnalyticsClient(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion, boolean isStaticResource) {
return getTextAnalyticsClientBuilder(
buildSyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient),
serviceVersion,
isStaticResource)
.buildClient();
}
/**
* Verify that we can get statistics on the collection result when given a batch of documents with options.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageShowStatisticsRunner((inputs, options) -> validateDetectLanguageResultCollectionWithResponse(true,
getExpectedBatchDetectedLanguages(), 200,
client.detectLanguageBatchWithResponse(inputs, options, Context.NONE)));
}
/**
* Test Detect batch of documents languages.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageRunner((inputs) -> validateDetectLanguageResultCollectionWithResponse(false,
getExpectedBatchDetectedLanguages(), 200,
client.detectLanguageBatchWithResponse(inputs, null, Context.NONE)));
}
/**
* Test detect batch languages for a list of string input with country hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguagesCountryHintRunner((inputs, countryHint) -> validateDetectLanguageResultCollection(
false, getExpectedBatchDetectedLanguages(),
client.detectLanguageBatch(inputs, countryHint, null)));
}
/**
* Test detect batch languages for a list of string input with request options
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> validateDetectLanguageResultCollection(true,
getExpectedBatchDetectedLanguages(), client.detectLanguageBatch(inputs, null, options)));
}
/**
* Test detect batch languages for a list of string input.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageStringInputRunner((inputs) -> validateDetectLanguageResultCollection(
false, getExpectedBatchDetectedLanguages(), client.detectLanguageBatch(inputs, null, null)));
}
/**
* Verifies that a single DetectLanguageResult is returned for a document to detect language.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectSingleTextLanguageRunner(input ->
validatePrimaryLanguage(getDetectedLanguageEnglish(), client.detectLanguage(input)));
}
/**
* Verifies that a TextAnalyticsException is thrown for an empty document.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(input -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.detectLanguage(input));
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
/**
* Verifies that a bad request exception is returned for input documents with same IDs.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageDuplicateIdRunner((inputs, options) -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.detectLanguageBatchWithResponse(inputs, options, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
/**
* Verifies that an invalid document exception is returned for input documents with an empty ID.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageInputEmptyIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.detectLanguageBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
/**
* Verifies that a TextAnalyticsException is thrown for a document with invalid country hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageInvalidCountryHintRunner((input, countryHint) -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.detectLanguage(input, countryHint));
assertEquals(INVALID_COUNTRY_HINT, exception.getErrorCode());
});
}
/**
* Verify that with countryHint with empty string will not throw exception.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageEmptyCountryHintRunner((input, countryHint) ->
validatePrimaryLanguage(getDetectedLanguageSpanish(), client.detectLanguage(input, countryHint)));
}
/**
* Verify that with countryHint with "none" will not throw exception.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageNoneCountryHintRunner((input, countryHint) ->
validatePrimaryLanguage(getDetectedLanguageSpanish(), client.detectLanguage(input, countryHint)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeCategorizedEntitiesForSingleTextInputRunner(input -> {
final List<CategorizedEntity> entities = client.recognizeEntities(input).stream().collect(Collectors.toList());
validateCategorizedEntities(entities);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(input -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.recognizeEntities(input).iterator().hasNext());
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> {
Response<RecognizeEntitiesResultCollection> response = client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE);
response.getValue().forEach(recognizeEntitiesResult -> {
Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities);
assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage());
});
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntityRunner((inputs) ->
validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200,
client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) ->
validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200,
client.recognizeEntitiesBatchWithResponse(inputs, options, Context.NONE))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeCategorizedEntityStringInputRunner((inputs) ->
validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(),
client.recognizeEntitiesBatch(inputs, null, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) ->
validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(),
client.recognizeEntitiesBatch(inputs, language, null))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) ->
validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(),
client.recognizeEntitiesBatch(inputs, null, options))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizeEntitiesBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchWithResponseEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(15, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(22, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(30, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(14, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(15, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(126, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizePiiSingleDocumentRunner(document -> {
final PiiEntityCollection entities = client.recognizePiiEntities(document);
validatePiiEntities(getPiiEntitiesList1(), entities.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(document -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class, () ->
client.recognizePiiEntities(document).iterator().hasNext());
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.recognizePiiEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizePiiEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitySingleErrorRunner((inputs) -> {
Response<RecognizePiiEntitiesResultCollection> response = client.recognizePiiEntitiesBatchWithResponse(inputs, null, Context.NONE);
response.getValue().forEach(recognizePiiEntitiesResult -> {
Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities);
assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage());
});
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesRunner(inputs ->
validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200,
client.recognizePiiEntitiesBatchWithResponse(inputs, null, Context.NONE)));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) ->
validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200,
client.recognizePiiEntitiesBatchWithResponse(inputs, options, Context.NONE)));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizePiiEntitiesLanguageHintRunner((inputs, language) ->
validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(),
client.recognizePiiEntitiesBatch(inputs, language, null))
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) ->
validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(),
client.recognizePiiEntitiesBatch(inputs, null, options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizePiiEntitiesBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(10, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(17, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(25, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(9, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(10, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(121, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizePiiDomainFilterRunner((document, options) -> {
final PiiEntityCollection entities = client.recognizePiiEntities(document, "en", options);
validatePiiEntities(getPiiEntitiesList1ForDomainFilter(), entities.stream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputStringForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizePiiLanguageHintRunner((inputs, language) -> {
final RecognizePiiEntitiesResultCollection response = client.recognizePiiEntitiesBatch(inputs, language,
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION));
validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntitiesForDomainFilter(), response);
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesRunner((inputs) -> {
final Response<RecognizePiiEntitiesResultCollection> response = client.recognizePiiEntitiesBatchWithResponse(inputs,
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION), Context.NONE);
validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntitiesForDomainFilter(), 200, response);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputForCategoriesFilter(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
(inputs, options) -> {
final RecognizePiiEntitiesResultCollection resultCollection =
client.recognizePiiEntitiesBatch(inputs, "en", options);
validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection);
}
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntityWithCategoriesFilterFromOtherResult(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
(inputs, options) -> {
List<PiiEntityCategory> categories = new ArrayList<>();
final RecognizePiiEntitiesResultCollection resultCollection = client.recognizePiiEntitiesBatch(inputs, "en", options);
resultCollection.forEach(
result -> result.getEntities().forEach(
piiEntity -> {
final PiiEntityCategory category = piiEntity.getCategory();
if (PiiEntityCategory.ABA_ROUTING_NUMBER == category
|| PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER == category) {
categories.add(category);
}
}));
validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection);
final PiiEntityCategory[] piiEntityCategories = categories.toArray(
new PiiEntityCategory[categories.size()]);
options.setCategoriesFilter(piiEntityCategories);
final RecognizePiiEntitiesResultCollection resultCollection2 = client.recognizePiiEntitiesBatch(
inputs, "en", options);
validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection2);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeLinkedEntitiesForSingleTextInputRunner(input -> {
final List<LinkedEntity> linkedEntities = client.recognizeLinkedEntities(input)
.stream().collect(Collectors.toList());
validateLinkedEntity(getLinkedEntitiesList1().get(0), linkedEntities.get(0));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(input -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.recognizeLinkedEntities(input).iterator().hasNext());
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.recognizeLinkedEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizeLinkedEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchLinkedEntityRunner((inputs) ->
validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200,
client.recognizeLinkedEntitiesBatchWithResponse(inputs, null, Context.NONE))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) ->
validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200,
client.recognizeLinkedEntitiesBatchWithResponse(inputs, options, Context.NONE)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeLinkedStringInputRunner((inputs) ->
validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), client.recognizeLinkedEntitiesBatch(inputs, null, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeLinkedLanguageHintRunner((inputs, language) ->
validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), client.recognizeLinkedEntitiesBatch(inputs, language, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) ->
validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), client.recognizeLinkedEntitiesBatch(inputs, null, options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizeLinkedEntitiesBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(15, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(22, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
client.recognizeLinkedEntities(document).forEach(linkedEntity ->
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(30, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(14, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(15, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity ->
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(126, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractKeyPhrasesForSingleTextInputRunner(input -> {
final KeyPhrasesCollection keyPhrasesCollection = client.extractKeyPhrases(input);
validateKeyPhrases(asList("monde"), keyPhrasesCollection.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(input -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.extractKeyPhrases(input).iterator().hasNext());
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.extractKeyPhrasesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.extractKeyPhrasesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractBatchKeyPhrasesRunner((inputs) ->
validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200,
client.extractKeyPhrasesBatchWithResponse(inputs, null, Context.NONE)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractBatchKeyPhrasesShowStatsRunner((inputs, options) ->
validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200,
client.extractKeyPhrasesBatchWithResponse(inputs, options, Context.NONE)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractKeyPhrasesStringInputRunner((inputs) ->
validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), client.extractKeyPhrasesBatch(inputs, null, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractKeyPhrasesLanguageHintRunner((inputs, language) ->
validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), client.extractKeyPhrasesBatch(inputs, language, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) ->
validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), client.extractKeyPhrasesBatch(inputs, null, options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.extractKeyPhrasesBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
/**
* Test analyzing sentiment for a string input.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentForSingleTextInputRunner(input -> {
validateDocumentSentiment(false, getExpectedDocumentSentiment(), client.analyzeSentiment(input));
});
}
/**
* Test analyzing sentiment for a string input with default language hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInputWithDefaultLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentForSingleTextInputRunner(input -> {
final DocumentSentiment analyzeSentimentResult = client.analyzeSentiment(input, null);
validateDocumentSentiment(false, getExpectedDocumentSentiment(), analyzeSentimentResult);
});
}
/**
* Test analyzing sentiment for a string input and verifying the result of opinion mining.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInputWithOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentForTextInputWithOpinionMiningRunner((input, options) -> {
final DocumentSentiment analyzeSentimentResult =
client.analyzeSentiment(input, "en", options);
validateDocumentSentiment(true, getExpectedDocumentSentiment(), analyzeSentimentResult);
});
}
/**
* Verifies that a TextAnalyticsException is thrown for an empty document.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(document -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.analyzeSentiment(document));
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
/**
* Test analyzing sentiment for a duplicate ID list.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.analyzeSentimentBatchWithResponse(inputs, new TextAnalyticsRequestOptions(), Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
/**
* Verifies that an invalid document exception is returned for input documents with an empty ID.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.analyzeSentimentBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* String documents with null TextAnalyticsRequestOptions and null language code which will use the default language
* code, 'en'.
*
* {@link TextAnalyticsClient
* which TextAnalyticsRequestOptions is null and null language code which will use the default language code, 'en'.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentStringInputRunner(inputs ->
validateAnalyzeSentimentResultCollection(false, false, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, null, new TextAnalyticsRequestOptions())));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* String documents with null TextAnalyticsRequestOptions and given a language code.
*
* {@link TextAnalyticsClient
* which TextAnalyticsRequestOptions is null and given a language code.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringWithLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentLanguageHintRunner((inputs, language) ->
validateAnalyzeSentimentResultCollection(false, false, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, language, new TextAnalyticsRequestOptions())));
}
/**
* Verify that the collection result includes request statistics but not sentence options when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which to show the request statistics only and verify the analyzed sentiment result.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringShowStatisticsExcludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) ->
validateAnalyzeSentimentResultCollection(true, false, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, null, options.setIncludeOpinionMining(false))));
}
/**
* Verify that the collection result includes sentence options but not request statistics when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) -> {
options.setIncludeStatistics(false);
validateAnalyzeSentimentResultCollection(false, true, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, null, options));
});
}
/**
* Verify that the collection result includes sentence options and request statistics when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringShowStatisticsAndIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) ->
validateAnalyzeSentimentResultCollection(true, true, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, null, options)));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* TextDocumentInput documents with null TextAnalyticsRequestOptions.
*
* {@link TextAnalyticsClient
* which TextAnalyticsRequestOptions is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputWithNullRequestOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentRunner(inputs ->
validateAnalyzeSentimentResultCollectionWithResponse(false, false, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, (TextAnalyticsRequestOptions) null, Context.NONE)));
}
/**
* Verify that we can get statistics on the collection result when given a batch of
* TextDocumentInput documents with TextAnalyticsRequestOptions.
*
* {@link TextAnalyticsClient
* which TextAnalyticsRequestOptions includes request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentShowStatsRunner((inputs, requestOptions) ->
validateAnalyzeSentimentResultCollectionWithResponse(true, false, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, requestOptions, Context.NONE)));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* TextDocumentInput documents with null AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputWithNullAnalyzeSentimentOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
validateAnalyzeSentimentResultCollectionWithResponse(false, false, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, (AnalyzeSentimentOptions) null, Context.NONE)));
}
/**
* Verify that the collection result includes request statistics but not sentence options when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes request statistics but not opinion mining.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatisticsExcludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
validateAnalyzeSentimentResultCollectionWithResponse(true, false, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, options.setIncludeOpinionMining(false), Context.NONE)));
}
/**
* Verify that the collection result includes sentence options but not request statistics when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes opinion mining but not request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) -> {
options.setIncludeStatistics(false);
validateAnalyzeSentimentResultCollectionWithResponse(false, true, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, options, Context.NONE));
});
}
/**
* Verify that the collection result includes sentence options and request statistics when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatisticsAndIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
validateAnalyzeSentimentResultCollectionWithResponse(true, true, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, options, Context.NONE)));
}
/**
* Verifies that an InvalidDocumentBatch exception is returned for input documents with too many documents.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.analyzeSentimentBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(
sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(27, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(19, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(9, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(34, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(26, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(16, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(42, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(34, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(24, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(26, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(18, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(8, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(27, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(19, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(9, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(
document ->
client.analyzeSentiment(document, null, new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(138, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(130, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(120, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareStringInputWithoutOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
healthcareStringInputRunner((documents, dummyOptions) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
false,
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareStringInputWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
healthcareStringInputRunner((documents, options) -> {
boolean isValidApiVersionForDisplayName = serviceVersion != TextAnalyticsServiceVersion.V3_0
&& serviceVersion != TextAnalyticsServiceVersion.V3_1;
if (isValidApiVersionForDisplayName) {
options.setDisplayName("operationName");
}
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
PollResponse<AnalyzeHealthcareEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
if (isValidApiVersionForDisplayName) {
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
}
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
healthcareLroRunner((documents, options) -> {
boolean isValidApiVersionForDisplayName = serviceVersion != TextAnalyticsServiceVersion.V3_0
&& serviceVersion != TextAnalyticsServiceVersion.V3_1;
if (isValidApiVersionForDisplayName) {
options.setDisplayName("operationName");
}
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options, Context.NONE);
syncPoller = setPollInterval(syncPoller);
PollResponse<AnalyzeHealthcareEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
if (isValidApiVersionForDisplayName) {
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
}
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareLroPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
healthcareLroPaginationRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForMultiplePages(0, 10, 0),
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList()));
}, 10);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareLroEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyListRunner((documents, errorMessage) -> {
final IllegalArgumentException exception = assertThrows(IllegalArgumentException.class,
() -> client.beginAnalyzeHealthcareEntities(documents, null, Context.NONE).getFinalResult());
assertEquals(errorMessage, exception.getMessage());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@Disabled("https:
public void analyzeHealthcareEntitiesEmojiUnicodeCodePoint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(22, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiFamily(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(29, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(37, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesDiacriticsNfc(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(21, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesDiacriticsNfd(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(22, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(133, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesForAssertion(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeHealthcareEntitiesForAssertionRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
final HealthcareEntityAssertion assertion =
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList())
.get(0).stream().collect(Collectors.toList())
.get(0).getEntities().stream().collect(Collectors.toList())
.get(1)
.getAssertion();
assertEquals(EntityConditionality.HYPOTHETICAL, assertion.getConditionality());
assertNull(assertion.getAssociation());
assertNull(assertion.getCertainty());
});
}
@Disabled("Temporary disable it for green test")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void cancelHealthcareLro(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
cancelHealthcareLroRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.cancelOperation();
LongRunningOperationStatus operationStatus = syncPoller.poll().getStatus();
while (!LongRunningOperationStatus.USER_CANCELLED.equals(operationStatus)) {
operationStatus = syncPoller.poll().getStatus();
}
syncPoller.waitForCompletion();
Assertions.assertEquals(LongRunningOperationStatus.USER_CANCELLED, operationStatus);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchActionsRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getRecognizePiiEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getAnalyzeSentimentResultCollectionForActions(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithMultiSameKindActions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeActionsWithMultiSameKindActionsRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, null, null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(actionsResult -> {
assertEquals(2, actionsResult.getRecognizeEntitiesResults().stream().count());
assertEquals(2, actionsResult.getRecognizePiiEntitiesResults().stream().count());
assertEquals(2, actionsResult.getRecognizeLinkedEntitiesResults().stream().count());
assertEquals(2, actionsResult.getAnalyzeSentimentResults().stream().count());
assertEquals(2, actionsResult.getExtractKeyPhrasesResults().stream().count());
});
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithActionNames(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeActionsWithActionNamesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(actionsResult -> {
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getRecognizeEntitiesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getRecognizePiiEntitiesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getAnalyzeSentimentResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getExtractKeyPhrasesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
});
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchActionsPaginationRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable>
syncPoller = client.beginAnalyzeActions(
documents, tasks, new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
getExpectedAnalyzeActionsResultListForMultiplePages(0, 20, 2),
result.stream().collect(Collectors.toList()));
}, 22);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyListRunner((documents, errorMessage) -> {
final IllegalArgumentException exception = assertThrows(IllegalArgumentException.class,
() -> client.beginAnalyzeActions(documents,
new TextAnalyticsActions().setRecognizeEntitiesActions(new RecognizeEntitiesAction()),
null, Context.NONE)
.getFinalResult());
assertEquals(errorMessage, exception.getMessage());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeEntitiesRecognitionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeEntitiesRecognitionRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
}
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzePiiEntityRecognitionWithCategoriesFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzePiiEntityRecognitionWithCategoriesFiltersRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getExpectedBatchPiiEntitiesForCategoriesFilter(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
}
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzePiiEntityRecognitionWithDomainFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzePiiEntityRecognitionWithDomainFiltersRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getExpectedBatchPiiEntitiesForDomainFilter(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
}
);
}
@Disabled("Linked entity action do not work")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeLinkedEntityActions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeLinkedEntityRecognitionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizeLinkedEntitiesActionResult(false, null,
TIME_NOW, getRecognizeLinkedEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeKeyPhrasesExtractionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractKeyPhrasesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getExpectedBatchTextSentiment(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeHealthcareEntitiesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedAnalyzeHealthcareEntitiesActionResult(false, null, TIME_NOW,
getExpectedAnalyzeHealthcareEntitiesResultCollection(2,
asList(
getRecognizeHealthcareEntitiesResult1("0"),
getRecognizeHealthcareEntitiesResult2())),
null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntitiesAction(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getRecognizeCustomEntitiesResults().forEach(
customEntitiesActionResult -> customEntitiesActionResult.getDocumentsResults().forEach(
documentResult -> validateCategorizedEntities(
documentResult.getEntities().stream().collect(Collectors.toList())))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassificationAction(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomSingleCategoryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getSingleLabelClassifyResults().forEach(
customSingleCategoryActionResult -> customSingleCategoryActionResult.getDocumentsResults().forEach(
documentResult -> validateLabelClassificationResult(documentResult))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiCategoryClassifyAction(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomMultiCategoryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getMultiLabelClassifyResults().forEach(
customMultiCategoryActionResult -> customMultiCategoryActionResult.getDocumentsResults().forEach(
documentResult -> validateLabelClassificationResult(documentResult))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntitiesStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesRunner((documents, parameters) -> {
SyncPoller<RecognizeCustomEntitiesOperationDetail, RecognizeCustomEntitiesPagedIterable> syncPoller =
client.beginRecognizeCustomEntities(documents, parameters.get(0), parameters.get(1));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
RecognizeCustomEntitiesPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult ->
validateCategorizedEntities(documentResult.getEntities().stream().collect(Collectors.toList()))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntities(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesRunner((documents, parameters) -> {
RecognizeCustomEntitiesOptions options = new RecognizeCustomEntitiesOptions()
.setDisplayName("operationName");
SyncPoller<RecognizeCustomEntitiesOperationDetail, RecognizeCustomEntitiesPagedIterable> syncPoller =
client.beginRecognizeCustomEntities(documents, parameters.get(0), parameters.get(1), "en", options);
syncPoller = setPollInterval(syncPoller);
PollResponse<RecognizeCustomEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
RecognizeCustomEntitiesPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult ->
validateCategorizedEntities(documentResult.getEntities().stream().collect(Collectors.toList()))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassificationStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomSingleLabelRunner((documents, parameters) -> {
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedIterable> syncPoller =
client.beginSingleLabelClassify(documents, parameters.get(0), parameters.get(1));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ClassifyDocumentPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomSingleLabelRunner((documents, parameters) -> {
SingleLabelClassifyOptions options = new SingleLabelClassifyOptions().setDisplayName("operationName");
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedIterable> syncPoller =
client.beginSingleLabelClassify(documents, parameters.get(0), parameters.get(1), "en", options);
syncPoller = setPollInterval(syncPoller);
PollResponse<ClassifyDocumentOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
ClassifyDocumentPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiLabelClassificationStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomMultiLabelRunner((documents, parameters) -> {
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedIterable> syncPoller =
client.beginMultiLabelClassify(documents, parameters.get(0), parameters.get(1));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ClassifyDocumentPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomMultiLabelRunner((documents, parameters) -> {
MultiLabelClassifyOptions options = new MultiLabelClassifyOptions().setDisplayName("operationName");
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedIterable> syncPoller =
client.beginMultiLabelClassify(documents, parameters.get(0), parameters.get(1), "en", options);
syncPoller = setPollInterval(syncPoller);
PollResponse<ClassifyDocumentOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
ClassifyDocumentPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExtractiveSummaryActionResult(false, null,
TIME_NOW,
getExpectedExtractiveSummaryResultCollection(getExpectedExtractiveSummaryResultSortByOffset()),
null))),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
}, null, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionSortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(isAscendingOrderByOffSet(
documentResult.getSentences().stream().collect(Collectors.toList()))))));
}, 4, ExtractiveSummarySentencesOrder.OFFSET);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionSortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(isDescendingOrderByRankScore(
documentResult.getSentences().stream().collect(Collectors.toList()))))));
}, 4, ExtractiveSummarySentencesOrder.RANK);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithSentenceCountLessThanMaxCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(
documentResult.getSentences().stream().collect(Collectors.toList()).size() < 20))));
}, 20, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithNonDefaultSentenceCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertEquals(
documentResult.getSentences().stream().collect(Collectors.toList()).size(), 5))));
}, 5, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionMaxSentenceCountInvalidRangeException(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
int[] invalidMaxSentenceCounts = {0, 21};
for (int invalidCount: invalidMaxSentenceCounts) {
extractiveSummaryActionRunner(
(documents, tasks) -> {
HttpResponseException exception = assertThrows(HttpResponseException.class, () -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
});
assertEquals(
INVALID_PARAMETER_VALUE,
((TextAnalyticsError) exception.getValue()).getErrorCode());
}, invalidCount, null);
}
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeAbstractiveSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
abstractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getAbstractiveSummaryActionResult(false, null,
TIME_NOW,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
null
)))
)),
result.stream().collect(Collectors.toList()));
}, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryDuplicateIdInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.beginAbstractSummary(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.beginAbstractSummary(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.beginAbstractSummary(inputs, null, null).getFinalResult());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_PARAMETER_VALUE, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
abstractiveSummaryRunner((documents, options) -> {
SyncPoller<AbstractiveSummaryOperationDetail, AbstractiveSummaryPagedIterable> syncPoller =
client.beginAbstractSummary(documents);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AbstractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResult -> validateAbstractiveSummaryResultCollection(false,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
documentResult));
}, 4);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
abstractiveSummaryMaxOverloadRunner((documents, options) -> {
SyncPoller<AbstractiveSummaryOperationDetail, AbstractiveSummaryPagedIterable> syncPoller =
client.beginAbstractSummary(documents, options, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AbstractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResult -> validateAbstractiveSummaryResultCollection(false,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
documentResult));
}, 4);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
isAscendingOrderByOffSet(documentResult.getSentences().stream().collect(Collectors.toList())))
));
}, 4, ExtractiveSummarySentencesOrder.OFFSET);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
isDescendingOrderByRankScore(
documentResult.getSentences().stream().collect(Collectors.toList())))
));
}, 4, ExtractiveSummarySentencesOrder.RANK);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySentenceCountLessThanMaxCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
documentResult.getSentences().stream().collect(Collectors.toList()).size() < 20)));
}, 20, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummaryNonDefaultSentenceCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertEquals(
documentResult.getSentences().stream().collect(Collectors.toList()).size(), 5)));
}, 5, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummaryMaxSentenceCountInvalidRangeException(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
int[] invalidMaxSentenceCounts = {0, 21};
for (int invalidCount: invalidMaxSentenceCounts) {
extractiveSummaryRunner(
(documents, options) -> {
HttpResponseException exception = assertThrows(HttpResponseException.class, () -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
});
assertEquals(
INVALID_PARAMETER_VALUE,
((TextAnalyticsError) exception.getValue()).getErrorCode());
}, invalidCount, null);
}
}
} | class TextAnalyticsClientTest extends TextAnalyticsClientTestBase {
private TextAnalyticsClient client;
private HttpClient buildSyncAssertingClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertSync()
.build();
}
private TextAnalyticsClient getTextAnalyticsClient(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion, boolean isStaticResource) {
return getTextAnalyticsClientBuilder(
buildSyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient),
serviceVersion,
isStaticResource)
.buildClient();
}
/**
* Verify that we can get statistics on the collection result when given a batch of documents with options.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageShowStatisticsRunner((inputs, options) -> validateDetectLanguageResultCollectionWithResponse(true,
getExpectedBatchDetectedLanguages(), 200,
client.detectLanguageBatchWithResponse(inputs, options, Context.NONE)));
}
/**
* Test Detect batch of documents languages.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageRunner((inputs) -> validateDetectLanguageResultCollectionWithResponse(false,
getExpectedBatchDetectedLanguages(), 200,
client.detectLanguageBatchWithResponse(inputs, null, Context.NONE)));
}
/**
* Test detect batch languages for a list of string input with country hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchListCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguagesCountryHintRunner((inputs, countryHint) -> validateDetectLanguageResultCollection(
false, getExpectedBatchDetectedLanguages(),
client.detectLanguageBatch(inputs, countryHint, null)));
}
/**
* Test detect batch languages for a list of string input with request options
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchListCountryHintWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguagesBatchListCountryHintWithOptionsRunner((inputs, options) -> validateDetectLanguageResultCollection(true,
getExpectedBatchDetectedLanguages(), client.detectLanguageBatch(inputs, null, options)));
}
/**
* Test detect batch languages for a list of string input.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguagesBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageStringInputRunner((inputs) -> validateDetectLanguageResultCollection(
false, getExpectedBatchDetectedLanguages(), client.detectLanguageBatch(inputs, null, null)));
}
/**
* Verifies that a single DetectLanguageResult is returned for a document to detect language.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectSingleTextLanguage(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectSingleTextLanguageRunner(input ->
validatePrimaryLanguage(getDetectedLanguageEnglish(), client.detectLanguage(input)));
}
/**
* Verifies that a TextAnalyticsException is thrown for an empty document.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(input -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.detectLanguage(input));
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
/**
* Verifies that a bad request exception is returned for input documents with same IDs.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageDuplicateIdRunner((inputs, options) -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.detectLanguageBatchWithResponse(inputs, options, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
/**
* Verifies that an invalid document exception is returned for input documents with an empty ID.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageInputEmptyIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.detectLanguageBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
/**
* Verifies that a TextAnalyticsException is thrown for a document with invalid country hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageInvalidCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageInvalidCountryHintRunner((input, countryHint) -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.detectLanguage(input, countryHint));
assertEquals(INVALID_COUNTRY_HINT, exception.getErrorCode());
});
}
/**
* Verify that with countryHint with empty string will not throw exception.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageEmptyCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageEmptyCountryHintRunner((input, countryHint) ->
validatePrimaryLanguage(getDetectedLanguageSpanish(), client.detectLanguage(input, countryHint)));
}
/**
* Verify that with countryHint with "none" will not throw exception.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void detectLanguageNoneCountryHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
detectLanguageNoneCountryHintRunner((input, countryHint) ->
validatePrimaryLanguage(getDetectedLanguageSpanish(), client.detectLanguage(input, countryHint)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeCategorizedEntitiesForSingleTextInputRunner(input -> {
final List<CategorizedEntity> entities = client.recognizeEntities(input).stream().collect(Collectors.toList());
validateCategorizedEntities(entities);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(input -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.recognizeEntities(input).iterator().hasNext());
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntitySingleErrorRunner((inputs) -> {
Response<RecognizeEntitiesResultCollection> response = client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE);
response.getValue().forEach(recognizeEntitiesResult -> {
Exception exception = assertThrows(TextAnalyticsException.class, recognizeEntitiesResult::getEntities);
assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizeEntitiesResult"), exception.getMessage());
});
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntityRunner((inputs) ->
validateCategorizedEntitiesResultCollectionWithResponse(false, getExpectedBatchCategorizedEntities(), 200,
client.recognizeEntitiesBatchWithResponse(inputs, null, Context.NONE))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchCategorizedEntitiesShowStatsRunner((inputs, options) ->
validateCategorizedEntitiesResultCollectionWithResponse(true, getExpectedBatchCategorizedEntities(), 200,
client.recognizeEntitiesBatchWithResponse(inputs, options, Context.NONE))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeCategorizedEntityStringInputRunner((inputs) ->
validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(),
client.recognizeEntitiesBatch(inputs, null, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeCategorizedEntitiesLanguageHintRunner((inputs, language) ->
validateCategorizedEntitiesResultCollection(false, getExpectedBatchCategorizedEntities(),
client.recognizeEntitiesBatch(inputs, language, null))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesForListWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeStringBatchCategorizedEntitiesShowStatsRunner((inputs, options) ->
validateCategorizedEntitiesResultCollection(true, getExpectedBatchCategorizedEntities(),
client.recognizeEntitiesBatch(inputs, null, options))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizeEntitiesBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesBatchWithResponseEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(15, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(22, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(30, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(14, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(15, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(13, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
client.recognizeEntities(document).forEach(
categorizedEntity -> {
assertEquals(9, categorizedEntity.getLength());
assertEquals(126, categorizedEntity.getOffset());
}),
CATEGORIZED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizePiiSingleDocumentRunner(document -> {
final PiiEntityCollection entities = client.recognizePiiEntities(document);
validatePiiEntities(getPiiEntitiesList1(), entities.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(document -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class, () ->
client.recognizePiiEntities(document).iterator().hasNext());
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.recognizePiiEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizePiiEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesBatchInputSingleError(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitySingleErrorRunner((inputs) -> {
Response<RecognizePiiEntitiesResultCollection> response = client.recognizePiiEntitiesBatchWithResponse(inputs, null, Context.NONE);
response.getValue().forEach(recognizePiiEntitiesResult -> {
Exception exception = assertThrows(TextAnalyticsException.class, recognizePiiEntitiesResult::getEntities);
assertEquals(String.format(BATCH_ERROR_EXCEPTION_MESSAGE, "RecognizePiiEntitiesResult"), exception.getMessage());
});
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesRunner(inputs ->
validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntities(), 200,
client.recognizePiiEntitiesBatchWithResponse(inputs, null, Context.NONE)));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesShowStatsRunner((inputs, options) ->
validatePiiEntitiesResultCollectionWithResponse(true, getExpectedBatchPiiEntities(), 200,
client.recognizePiiEntitiesBatchWithResponse(inputs, options, Context.NONE)));
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizePiiEntitiesLanguageHintRunner((inputs, language) ->
validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntities(),
client.recognizePiiEntitiesBatch(inputs, language, null))
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesShowStatsRunner((inputs, options) ->
validatePiiEntitiesResultCollection(true, getExpectedBatchPiiEntities(),
client.recognizePiiEntitiesBatch(inputs, null, options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizePiiEntitiesBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(10, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(17, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(25, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(9, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(10, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(8, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(document -> {
final PiiEntityCollection result = client.recognizePiiEntities(document);
result.forEach(piiEntity -> {
assertEquals(11, piiEntity.getLength());
assertEquals(121, piiEntity.getOffset());
});
}, PII_ENTITY_OFFSET_INPUT);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizePiiDomainFilterRunner((document, options) -> {
final PiiEntityCollection entities = client.recognizePiiEntities(document, "en", options);
validatePiiEntities(getPiiEntitiesList1ForDomainFilter(), entities.stream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputStringForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizePiiLanguageHintRunner((inputs, language) -> {
final RecognizePiiEntitiesResultCollection response = client.recognizePiiEntitiesBatch(inputs, language,
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION));
validatePiiEntitiesResultCollection(false, getExpectedBatchPiiEntitiesForDomainFilter(), response);
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputForDomainFilter(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchPiiEntitiesRunner((inputs) -> {
final Response<RecognizePiiEntitiesResultCollection> response = client.recognizePiiEntitiesBatchWithResponse(inputs,
new RecognizePiiEntitiesOptions().setDomainFilter(PiiEntityDomain.PROTECTED_HEALTH_INFORMATION), Context.NONE);
validatePiiEntitiesResultCollectionWithResponse(false, getExpectedBatchPiiEntitiesForDomainFilter(), 200, response);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntitiesForBatchInputForCategoriesFilter(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
(inputs, options) -> {
final RecognizePiiEntitiesResultCollection resultCollection =
client.recognizePiiEntitiesBatch(inputs, "en", options);
validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection);
}
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizePiiEntityWithCategoriesFilterFromOtherResult(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeStringBatchPiiEntitiesForCategoriesFilterRunner(
(inputs, options) -> {
List<PiiEntityCategory> categories = new ArrayList<>();
final RecognizePiiEntitiesResultCollection resultCollection = client.recognizePiiEntitiesBatch(inputs, "en", options);
resultCollection.forEach(
result -> result.getEntities().forEach(
piiEntity -> {
final PiiEntityCategory category = piiEntity.getCategory();
if (PiiEntityCategory.ABA_ROUTING_NUMBER == category
|| PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER == category) {
categories.add(category);
}
}));
validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection);
final PiiEntityCategory[] piiEntityCategories = categories.toArray(
new PiiEntityCategory[categories.size()]);
options.setCategoriesFilter(piiEntityCategories);
final RecognizePiiEntitiesResultCollection resultCollection2 = client.recognizePiiEntitiesBatch(
inputs, "en", options);
validatePiiEntitiesResultCollection(false,
getExpectedBatchPiiEntitiesForCategoriesFilter(), resultCollection2);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeLinkedEntitiesForSingleTextInputRunner(input -> {
final List<LinkedEntity> linkedEntities = client.recognizeLinkedEntities(input)
.stream().collect(Collectors.toList());
validateLinkedEntity(getLinkedEntitiesList1().get(0), linkedEntities.get(0));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(input -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.recognizeLinkedEntities(input).iterator().hasNext());
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.recognizeLinkedEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizeLinkedEntitiesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchLinkedEntityRunner((inputs) ->
validateLinkedEntitiesResultCollectionWithResponse(false, getExpectedBatchLinkedEntities(), 200,
client.recognizeLinkedEntitiesBatchWithResponse(inputs, null, Context.NONE))
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchLinkedEntitiesShowStatsRunner((inputs, options) ->
validateLinkedEntitiesResultCollectionWithResponse(true, getExpectedBatchLinkedEntities(), 200,
client.recognizeLinkedEntitiesBatchWithResponse(inputs, options, Context.NONE)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeLinkedStringInputRunner((inputs) ->
validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), client.recognizeLinkedEntitiesBatch(inputs, null, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeLinkedLanguageHintRunner((inputs, language) ->
validateLinkedEntitiesResultCollection(false, getExpectedBatchLinkedEntities(), client.recognizeLinkedEntitiesBatch(inputs, language, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
recognizeBatchStringLinkedEntitiesShowStatsRunner((inputs, options) ->
validateLinkedEntitiesResultCollection(true, getExpectedBatchLinkedEntities(), client.recognizeLinkedEntitiesBatch(inputs, null, options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.recognizeLinkedEntitiesBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(15, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(22, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesEmojiFamilyWIthSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(document ->
client.recognizeLinkedEntities(document).forEach(linkedEntity ->
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(30, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(14, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(15, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity -> linkedEntity.getMatches().forEach(
linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(13, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeLinkedEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(document ->
client.recognizeLinkedEntities(document).forEach(
linkedEntity ->
linkedEntity.getMatches().forEach(linkedEntityMatch -> {
assertEquals(9, linkedEntityMatch.getLength());
assertEquals(126, linkedEntityMatch.getOffset());
})),
LINKED_ENTITY_INPUTS.get(1)
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractKeyPhrasesForSingleTextInputRunner(input -> {
final KeyPhrasesCollection keyPhrasesCollection = client.extractKeyPhrases(input);
validateKeyPhrases(asList("monde"), keyPhrasesCollection.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(input -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.extractKeyPhrases(input).iterator().hasNext());
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.extractKeyPhrasesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.extractKeyPhrasesBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractBatchKeyPhrasesRunner((inputs) ->
validateExtractKeyPhrasesResultCollectionWithResponse(false, getExpectedBatchKeyPhrases(), 200,
client.extractKeyPhrasesBatchWithResponse(inputs, null, Context.NONE)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractBatchKeyPhrasesShowStatsRunner((inputs, options) ->
validateExtractKeyPhrasesResultCollectionWithResponse(true, getExpectedBatchKeyPhrases(), 200,
client.extractKeyPhrasesBatchWithResponse(inputs, options, Context.NONE)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractKeyPhrasesStringInputRunner((inputs) ->
validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), client.extractKeyPhrasesBatch(inputs, null, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForListLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractKeyPhrasesLanguageHintRunner((inputs, language) ->
validateExtractKeyPhrasesResultCollection(false, getExpectedBatchKeyPhrases(), client.extractKeyPhrasesBatch(inputs, language, null)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesForListStringWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractBatchStringKeyPhrasesShowStatsRunner((inputs, options) ->
validateExtractKeyPhrasesResultCollection(true, getExpectedBatchKeyPhrases(), client.extractKeyPhrasesBatch(inputs, null, options)));
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void extractKeyPhrasesBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.extractKeyPhrasesBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
/**
* Test analyzing sentiment for a string input.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentForSingleTextInputRunner(input -> {
validateDocumentSentiment(false, getExpectedDocumentSentiment(), client.analyzeSentiment(input));
});
}
/**
* Test analyzing sentiment for a string input with default language hint.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInputWithDefaultLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentForSingleTextInputRunner(input -> {
final DocumentSentiment analyzeSentimentResult = client.analyzeSentiment(input, null);
validateDocumentSentiment(false, getExpectedDocumentSentiment(), analyzeSentimentResult);
});
}
/**
* Test analyzing sentiment for a string input and verifying the result of opinion mining.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForTextInputWithOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentForTextInputWithOpinionMiningRunner((input, options) -> {
final DocumentSentiment analyzeSentimentResult =
client.analyzeSentiment(input, "en", options);
validateDocumentSentiment(true, getExpectedDocumentSentiment(), analyzeSentimentResult);
});
}
/**
* Verifies that a TextAnalyticsException is thrown for an empty document.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForEmptyText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyTextRunner(document -> {
final TextAnalyticsException exception = assertThrows(TextAnalyticsException.class,
() -> client.analyzeSentiment(document));
assertEquals(INVALID_DOCUMENT, exception.getErrorCode());
});
}
/**
* Test analyzing sentiment for a duplicate ID list.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDuplicateIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.analyzeSentimentBatchWithResponse(inputs, new TextAnalyticsRequestOptions(), Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
/**
* Verifies that an invalid document exception is returned for input documents with an empty ID.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.analyzeSentimentBatchWithResponse(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* String documents with null TextAnalyticsRequestOptions and null language code which will use the default language
* code, 'en'.
*
* {@link TextAnalyticsClient
* which TextAnalyticsRequestOptions is null and null language code which will use the default language code, 'en'.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentStringInputRunner(inputs ->
validateAnalyzeSentimentResultCollection(false, false, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, null, new TextAnalyticsRequestOptions())));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* String documents with null TextAnalyticsRequestOptions and given a language code.
*
* {@link TextAnalyticsClient
* which TextAnalyticsRequestOptions is null and given a language code.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringWithLanguageHint(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentLanguageHintRunner((inputs, language) ->
validateAnalyzeSentimentResultCollection(false, false, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, language, new TextAnalyticsRequestOptions())));
}
/**
* Verify that the collection result includes request statistics but not sentence options when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which to show the request statistics only and verify the analyzed sentiment result.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringShowStatisticsExcludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) ->
validateAnalyzeSentimentResultCollection(true, false, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, null, options.setIncludeOpinionMining(false))));
}
/**
* Verify that the collection result includes sentence options but not request statistics when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) -> {
options.setIncludeStatistics(false);
validateAnalyzeSentimentResultCollection(false, true, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, null, options));
});
}
/**
* Verify that the collection result includes sentence options and request statistics when given a batch of
* String documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForListStringShowStatisticsAndIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchStringSentimentShowStatsAndIncludeOpinionMiningRunner((inputs, options) ->
validateAnalyzeSentimentResultCollection(true, true, getExpectedBatchTextSentiment(),
client.analyzeSentimentBatch(inputs, null, options)));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* TextDocumentInput documents with null TextAnalyticsRequestOptions.
*
* {@link TextAnalyticsClient
* which TextAnalyticsRequestOptions is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputWithNullRequestOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentRunner(inputs ->
validateAnalyzeSentimentResultCollectionWithResponse(false, false, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, (TextAnalyticsRequestOptions) null, Context.NONE)));
}
/**
* Verify that we can get statistics on the collection result when given a batch of
* TextDocumentInput documents with TextAnalyticsRequestOptions.
*
* {@link TextAnalyticsClient
* which TextAnalyticsRequestOptions includes request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatistics(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentShowStatsRunner((inputs, requestOptions) ->
validateAnalyzeSentimentResultCollectionWithResponse(true, false, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, requestOptions, Context.NONE)));
}
/**
* Verify that the collection result excludes request statistics and sentence options when given a batch of
* TextDocumentInput documents with null AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputWithNullAnalyzeSentimentOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
validateAnalyzeSentimentResultCollectionWithResponse(false, false, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, (AnalyzeSentimentOptions) null, Context.NONE)));
}
/**
* Verify that the collection result includes request statistics but not sentence options when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes request statistics but not opinion mining.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatisticsExcludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
validateAnalyzeSentimentResultCollectionWithResponse(true, false, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, options.setIncludeOpinionMining(false), Context.NONE)));
}
/**
* Verify that the collection result includes sentence options but not request statistics when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes opinion mining but not request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputNotShowStatisticsButIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) -> {
options.setIncludeStatistics(false);
validateAnalyzeSentimentResultCollectionWithResponse(false, true, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, options, Context.NONE));
});
}
/**
* Verify that the collection result includes sentence options and request statistics when given a batch of
* TextDocumentInput documents with AnalyzeSentimentOptions.
*
* {@link TextAnalyticsClient
* which AnalyzeSentimentOptions includes opinion mining and request statistics.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentForBatchInputShowStatisticsAndIncludeOpinionMining(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchSentimentOpinionMining((inputs, options) ->
validateAnalyzeSentimentResultCollectionWithResponse(true, true, getExpectedBatchTextSentiment(), 200,
client.analyzeSentimentBatchWithResponse(inputs, options, Context.NONE)));
}
/**
* Verifies that an InvalidDocumentBatch exception is returned for input documents with too many documents.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentBatchTooManyDocuments(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.analyzeSentimentBatch(inputs, null, null).stream().findFirst().get());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT_BATCH, textAnalyticsError.getErrorCode());
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(
sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(27, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(19, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(9, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiFamily(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(34, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(26, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(16, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(42, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(34, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(24, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDiacriticsNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(26, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(18, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(8, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentDiacriticsNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(27, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(19, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(9, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(
document ->
client.analyzeSentiment(document, null, new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(25, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(17, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(7, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(
document ->
client.analyzeSentiment(document, null,
new AnalyzeSentimentOptions().setIncludeOpinionMining(true))
.getSentences()
.forEach(sentenceSentiment -> {
assertEquals(138, sentenceSentiment.getLength());
assertEquals(0, sentenceSentiment.getOffset());
sentenceSentiment.getOpinions().forEach(opinion -> {
opinion.getAssessments().forEach(assessmentSentiment -> {
assertEquals(7, assessmentSentiment.getLength());
assertEquals(130, assessmentSentiment.getOffset());
});
final TargetSentiment targetSentiment = opinion.getTarget();
assertEquals(5, targetSentiment.getLength());
assertEquals(120, targetSentiment.getOffset());
});
}),
SENTIMENT_OFFSET_INPUT
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareStringInputWithoutOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
healthcareStringInputRunner((documents, dummyOptions) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
false,
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareStringInputWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
healthcareStringInputRunner((documents, options) -> {
boolean isValidApiVersionForDisplayName = serviceVersion != TextAnalyticsServiceVersion.V3_0
&& serviceVersion != TextAnalyticsServiceVersion.V3_1;
if (isValidApiVersionForDisplayName) {
options.setDisplayName("operationName");
}
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
PollResponse<AnalyzeHealthcareEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
if (isValidApiVersionForDisplayName) {
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
}
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
healthcareLroRunner((documents, options) -> {
boolean isValidApiVersionForDisplayName = serviceVersion != TextAnalyticsServiceVersion.V3_0
&& serviceVersion != TextAnalyticsServiceVersion.V3_1;
if (isValidApiVersionForDisplayName) {
options.setDisplayName("operationName");
}
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options, Context.NONE);
syncPoller = setPollInterval(syncPoller);
PollResponse<AnalyzeHealthcareEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
if (isValidApiVersionForDisplayName) {
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
}
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForSinglePage(),
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareLroPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
healthcareLroPaginationRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
validateAnalyzeHealthcareEntitiesResultCollectionList(
options.isIncludeStatistics(),
getExpectedAnalyzeHealthcareEntitiesResultCollectionListForMultiplePages(0, 10, 0),
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList()));
}, 10);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void healthcareLroEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyListRunner((documents, errorMessage) -> {
final IllegalArgumentException exception = assertThrows(IllegalArgumentException.class,
() -> client.beginAnalyzeHealthcareEntities(documents, null, Context.NONE).getFinalResult());
assertEquals(errorMessage, exception.getMessage());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@Disabled("https:
public void analyzeHealthcareEntitiesEmojiUnicodeCodePoint(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmoji(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiWithSkinToneModifierRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(22, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiFamily(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(29, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesEmojiFamilyWithSkinToneModifier(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emojiFamilyWithSkinToneModifierRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(37, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesDiacriticsNfc(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfcRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(21, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesDiacriticsNfd(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
diacriticsNfdRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(22, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesKoreanNfc(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfcRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesKoreanNfd(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
koreanNfdRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(20, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesZalgoText(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
zalgoTextRunner(
document -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(
Collections.singletonList(new TextDocumentInput("0", document)), null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
analyzeHealthcareEntitiesPagedIterable.forEach(
result -> result.forEach(entitiesResult -> entitiesResult.getEntities().forEach(
entity -> {
assertEquals(11, entity.getLength());
assertEquals(133, entity.getOffset());
})));
},
HEALTHCARE_ENTITY_OFFSET_INPUT);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareEntitiesForAssertion(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeHealthcareEntitiesForAssertionRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeHealthcareEntitiesPagedIterable analyzeHealthcareEntitiesPagedIterable = syncPoller.getFinalResult();
final HealthcareEntityAssertion assertion =
analyzeHealthcareEntitiesPagedIterable.stream().collect(Collectors.toList())
.get(0).stream().collect(Collectors.toList())
.get(0).getEntities().stream().collect(Collectors.toList())
.get(1)
.getAssertion();
assertEquals(EntityConditionality.HYPOTHETICAL, assertion.getConditionality());
assertNull(assertion.getAssociation());
assertNull(assertion.getCertainty());
});
}
@Disabled("Temporary disable it for green test")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void cancelHealthcareLro(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
cancelHealthcareLroRunner((documents, options) -> {
SyncPoller<AnalyzeHealthcareEntitiesOperationDetail, AnalyzeHealthcareEntitiesPagedIterable>
syncPoller = client.beginAnalyzeHealthcareEntities(documents, options, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.cancelOperation();
LongRunningOperationStatus operationStatus = syncPoller.poll().getStatus();
while (!LongRunningOperationStatus.USER_CANCELLED.equals(operationStatus)) {
operationStatus = syncPoller.poll().getStatus();
}
syncPoller.waitForCompletion();
Assertions.assertEquals(LongRunningOperationStatus.USER_CANCELLED, operationStatus);
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithOptions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchActionsRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getRecognizePiiEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getAnalyzeSentimentResultCollectionForActions(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithMultiSameKindActions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeActionsWithMultiSameKindActionsRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, null, null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(actionsResult -> {
assertEquals(2, actionsResult.getRecognizeEntitiesResults().stream().count());
assertEquals(2, actionsResult.getRecognizePiiEntitiesResults().stream().count());
assertEquals(2, actionsResult.getRecognizeLinkedEntitiesResults().stream().count());
assertEquals(2, actionsResult.getAnalyzeSentimentResults().stream().count());
assertEquals(2, actionsResult.getExtractKeyPhrasesResults().stream().count());
});
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsWithActionNames(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeActionsWithActionNamesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, null, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(actionsResult -> {
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getRecognizeEntitiesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getRecognizePiiEntitiesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getAnalyzeSentimentResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
assertEquals(CUSTOM_ACTION_NAME, actionsResult.getExtractKeyPhrasesResults().stream()
.collect(Collectors.toList()).get(0).getActionName());
});
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsPagination(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeBatchActionsPaginationRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable>
syncPoller = client.beginAnalyzeActions(
documents, tasks, new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
getExpectedAnalyzeActionsResultListForMultiplePages(0, 20, 2),
result.stream().collect(Collectors.toList()));
}, 22);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeActionsEmptyInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyListRunner((documents, errorMessage) -> {
final IllegalArgumentException exception = assertThrows(IllegalArgumentException.class,
() -> client.beginAnalyzeActions(documents,
new TextAnalyticsActions().setRecognizeEntitiesActions(new RecognizeEntitiesAction()),
null, Context.NONE)
.getFinalResult());
assertEquals(errorMessage, exception.getMessage());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeEntitiesRecognitionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeEntitiesRecognitionRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(asList(getExpectedRecognizeEntitiesActionResult(false, null,
TIME_NOW, getRecognizeEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
}
);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzePiiEntityRecognitionWithCategoriesFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzePiiEntityRecognitionWithCategoriesFiltersRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getExpectedBatchPiiEntitiesForCategoriesFilter(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
}
);
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzePiiEntityRecognitionWithDomainFilters(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzePiiEntityRecognitionWithDomainFiltersRunner(
(documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks,
new AnalyzeActionsOptions().setIncludeStatistics(false), Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
Arrays.asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizePiiEntitiesActionResult(false, null,
TIME_NOW, getExpectedBatchPiiEntitiesForDomainFilter(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
}
);
}
@Disabled("Linked entity action do not work")
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeLinkedEntityActions(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeLinkedEntityRecognitionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(asList(getExpectedRecognizeLinkedEntitiesActionResult(false, null,
TIME_NOW, getRecognizeLinkedEntitiesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeKeyPhrasesExtractionAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractKeyPhrasesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedExtractKeyPhrasesActionResult(false, null,
TIME_NOW, getExtractKeyPhrasesResultCollection(), null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeSentimentAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeSentimentRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en",
new AnalyzeActionsOptions().setIncludeStatistics(false));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedAnalyzeSentimentActionResult(false, null,
TIME_NOW, getExpectedBatchTextSentiment(), null))),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeHealthcareAction(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
analyzeHealthcareEntitiesRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExpectedAnalyzeHealthcareEntitiesActionResult(false, null, TIME_NOW,
getExpectedAnalyzeHealthcareEntitiesResultCollection(2,
asList(
getRecognizeHealthcareEntitiesResult1("0"),
getRecognizeHealthcareEntitiesResult2())),
null))),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntitiesAction(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getRecognizeCustomEntitiesResults().forEach(
customEntitiesActionResult -> customEntitiesActionResult.getDocumentsResults().forEach(
documentResult -> validateCategorizedEntities(
documentResult.getEntities().stream().collect(Collectors.toList())))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassificationAction(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomSingleCategoryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getSingleLabelClassifyResults().forEach(
customSingleCategoryActionResult -> customSingleCategoryActionResult.getDocumentsResults().forEach(
documentResult -> validateLabelClassificationResult(documentResult))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiCategoryClassifyAction(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomMultiCategoryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", null);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getMultiLabelClassifyResults().forEach(
customMultiCategoryActionResult -> customMultiCategoryActionResult.getDocumentsResults().forEach(
documentResult -> validateLabelClassificationResult(documentResult))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntitiesStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesRunner((documents, parameters) -> {
SyncPoller<RecognizeCustomEntitiesOperationDetail, RecognizeCustomEntitiesPagedIterable> syncPoller =
client.beginRecognizeCustomEntities(documents, parameters.get(0), parameters.get(1));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
RecognizeCustomEntitiesPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult ->
validateCategorizedEntities(documentResult.getEntities().stream().collect(Collectors.toList()))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void recognizeCustomEntities(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
recognizeCustomEntitiesRunner((documents, parameters) -> {
RecognizeCustomEntitiesOptions options = new RecognizeCustomEntitiesOptions()
.setDisplayName("operationName");
SyncPoller<RecognizeCustomEntitiesOperationDetail, RecognizeCustomEntitiesPagedIterable> syncPoller =
client.beginRecognizeCustomEntities(documents, parameters.get(0), parameters.get(1), "en", options);
syncPoller = setPollInterval(syncPoller);
PollResponse<RecognizeCustomEntitiesOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
RecognizeCustomEntitiesPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult ->
validateCategorizedEntities(documentResult.getEntities().stream().collect(Collectors.toList()))));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassificationStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomSingleLabelRunner((documents, parameters) -> {
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedIterable> syncPoller =
client.beginSingleLabelClassify(documents, parameters.get(0), parameters.get(1));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ClassifyDocumentPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void singleLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomSingleLabelRunner((documents, parameters) -> {
SingleLabelClassifyOptions options = new SingleLabelClassifyOptions().setDisplayName("operationName");
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedIterable> syncPoller =
client.beginSingleLabelClassify(documents, parameters.get(0), parameters.get(1), "en", options);
syncPoller = setPollInterval(syncPoller);
PollResponse<ClassifyDocumentOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
ClassifyDocumentPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiLabelClassificationStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomMultiLabelRunner((documents, parameters) -> {
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedIterable> syncPoller =
client.beginMultiLabelClassify(documents, parameters.get(0), parameters.get(1));
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ClassifyDocumentPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void multiLabelClassification(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, true);
classifyCustomMultiLabelRunner((documents, parameters) -> {
MultiLabelClassifyOptions options = new MultiLabelClassifyOptions().setDisplayName("operationName");
SyncPoller<ClassifyDocumentOperationDetail, ClassifyDocumentPagedIterable> syncPoller =
client.beginMultiLabelClassify(documents, parameters.get(0), parameters.get(1), "en", options);
syncPoller = setPollInterval(syncPoller);
PollResponse<ClassifyDocumentOperationDetail> pollResponse = syncPoller.waitForCompletion();
assertEquals(options.getDisplayName(), pollResponse.getValue().getDisplayName());
ClassifyDocumentPagedIterable pagedIterable = syncPoller.getFinalResult();
pagedIterable.forEach(resultCollection ->
resultCollection.forEach(documentResult -> validateLabelClassificationResult(documentResult)));
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getExtractiveSummaryActionResult(false, null,
TIME_NOW,
getExpectedExtractiveSummaryResultCollection(getExpectedExtractiveSummaryResultSortByOffset()),
null))),
IterableStream.of(null)
)),
result.stream().collect(Collectors.toList()));
}, null, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionSortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(isAscendingOrderByOffSet(
documentResult.getSentences().stream().collect(Collectors.toList()))))));
}, 4, ExtractiveSummarySentencesOrder.OFFSET);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionSortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(isDescendingOrderByRankScore(
documentResult.getSentences().stream().collect(Collectors.toList()))))));
}, 4, ExtractiveSummarySentencesOrder.RANK);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithSentenceCountLessThanMaxCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertTrue(
documentResult.getSentences().stream().collect(Collectors.toList()).size() < 20))));
}, 20, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionWithNonDefaultSentenceCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
final List<AnalyzeActionsResult> actionsResults = result.stream().collect(Collectors.toList());
actionsResults.forEach(
actionsResult -> actionsResult.getExtractiveSummaryResults().forEach(
extractiveSummaryActionResult -> extractiveSummaryActionResult.getDocumentsResults().forEach(
documentResult -> assertEquals(
documentResult.getSentences().stream().collect(Collectors.toList()).size(), 5))));
}, 5, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeExtractSummaryActionMaxSentenceCountInvalidRangeException(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
int[] invalidMaxSentenceCounts = {0, 21};
for (int invalidCount: invalidMaxSentenceCounts) {
extractiveSummaryActionRunner(
(documents, tasks) -> {
HttpResponseException exception = assertThrows(HttpResponseException.class, () -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
});
assertEquals(
INVALID_PARAMETER_VALUE,
((TextAnalyticsError) exception.getValue()).getErrorCode());
}, invalidCount, null);
}
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void analyzeAbstractiveSummaryActionWithDefaultParameterValues(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
abstractiveSummaryActionRunner((documents, tasks) -> {
SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller =
client.beginAnalyzeActions(documents, tasks, "en", new AnalyzeActionsOptions());
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AnalyzeActionsResultPagedIterable result = syncPoller.getFinalResult();
validateAnalyzeBatchActionsResultList(false, false,
asList(getExpectedAnalyzeBatchActionsResult(
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(null),
IterableStream.of(asList(getAbstractiveSummaryActionResult(false, null,
TIME_NOW,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
null
)))
)),
result.stream().collect(Collectors.toList()));
}, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryDuplicateIdInput(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
duplicateIdRunner(inputs -> {
final HttpResponseException response = assertThrows(HttpResponseException.class,
() -> client.beginAbstractSummary(inputs, null, Context.NONE));
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, response.getResponse().getStatusCode());
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryEmptyIdInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
emptyDocumentIdRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.beginAbstractSummary(inputs, null, Context.NONE));
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_DOCUMENT, textAnalyticsError.getErrorCode());
});
}
@Disabled("https:
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryTooManyDocuments(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
tooManyDocumentsRunner(inputs -> {
final HttpResponseException httpResponseException = assertThrows(HttpResponseException.class,
() -> client.beginAbstractSummary(inputs, null, null).getFinalResult());
assertEquals(400, httpResponseException.getResponse().getStatusCode());
final TextAnalyticsError textAnalyticsError = (TextAnalyticsError) httpResponseException.getValue();
assertEquals(INVALID_PARAMETER_VALUE, textAnalyticsError.getErrorCode());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryStringInput(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
abstractiveSummaryRunner((documents, options) -> {
SyncPoller<AbstractiveSummaryOperationDetail, AbstractiveSummaryPagedIterable> syncPoller =
client.beginAbstractSummary(documents);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AbstractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResult -> validateAbstractiveSummaryResultCollection(false,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
documentResult));
}, 4);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginAbstractSummaryMaxOverload(HttpClient httpClient, TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
abstractiveSummaryMaxOverloadRunner((documents, options) -> {
SyncPoller<AbstractiveSummaryOperationDetail, AbstractiveSummaryPagedIterable> syncPoller =
client.beginAbstractSummary(documents, options, Context.NONE);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
AbstractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResult -> validateAbstractiveSummaryResultCollection(false,
new AbstractiveSummaryResultCollection(asList(getExpectedAbstractiveSummaryResult())),
documentResult));
}, 4);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySortedByOffset(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
isAscendingOrderByOffSet(documentResult.getSentences().stream().collect(Collectors.toList())))
));
}, 4, ExtractiveSummarySentencesOrder.OFFSET);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySortedByRankScore(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
isDescendingOrderByRankScore(
documentResult.getSentences().stream().collect(Collectors.toList())))
));
}, 4, ExtractiveSummarySentencesOrder.RANK);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummarySentenceCountLessThanMaxCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertTrue(
documentResult.getSentences().stream().collect(Collectors.toList()).size() < 20)));
}, 20, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummaryNonDefaultSentenceCount(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
extractiveSummaryRunner((documents, options) -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
result.stream().collect(Collectors.toList()).forEach(
documentResultCollection -> documentResultCollection.forEach(
documentResult -> assertEquals(
documentResult.getSentences().stream().collect(Collectors.toList()).size(), 5)));
}, 5, null);
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("com.azure.ai.textanalytics.TestUtils
public void beginExtractSummaryMaxSentenceCountInvalidRangeException(HttpClient httpClient,
TextAnalyticsServiceVersion serviceVersion) {
client = getTextAnalyticsClient(httpClient, serviceVersion, false);
int[] invalidMaxSentenceCounts = {0, 21};
for (int invalidCount: invalidMaxSentenceCounts) {
extractiveSummaryRunner(
(documents, options) -> {
HttpResponseException exception = assertThrows(HttpResponseException.class, () -> {
SyncPoller<ExtractiveSummaryOperationDetail, ExtractiveSummaryPagedIterable> syncPoller =
client.beginExtractSummary(documents, "en", options);
syncPoller = setPollInterval(syncPoller);
syncPoller.waitForCompletion();
ExtractiveSummaryPagedIterable result = syncPoller.getFinalResult();
});
assertEquals(
INVALID_PARAMETER_VALUE,
((TextAnalyticsError) exception.getValue()).getErrorCode());
}, invalidCount, null);
}
}
} |
When node count is 0, it represents a single node configuration with the ability to create distributed tables on that node. 2 or more worker nodes represent multi-node configuration. Node count value cannot be 1. Required for creation | public void testCreateCluster() {
Cluster cluster = null;
String randomPadding = randomPadding();
try {
String clusterName = "cluster" + randomPadding;
String adminPwd = "Pass@" + randomPadding;
cluster = cosmosDBForPostgreSqlManager
.clusters()
.define(clusterName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withAdministratorLoginPassword(adminPwd)
.withPostgresqlVersion("15")
.withCitusVersion("12.1")
.withMaintenanceWindow(new MaintenanceWindow()
.withCustomWindow("Disabled")
.withDayOfWeek(0)
.withStartHour(0)
.withStartMinute(0))
.withEnableShardsOnCoordinator(true)
.withEnableHa(false)
.withCoordinatorServerEdition("GeneralPurpose")
.withNodeServerEdition("MemoryOptimized")
.withCoordinatorStorageQuotaInMb(131072)
.withNodeStorageQuotaInMb(524288)
.withCoordinatorVCores(2)
.withNodeVCores(4)
.withCoordinatorEnablePublicIpAccess(true)
.withNodeEnablePublicIpAccess(true)
.withNodeCount(0)
.create();
cluster.refresh();
Assertions.assertEquals(cluster.name(), clusterName);
Assertions.assertEquals(cluster.name(), cosmosDBForPostgreSqlManager.clusters().getById(cluster.id()).name());
Assertions.assertTrue(cosmosDBForPostgreSqlManager.clusters().list().stream().findAny().isPresent());
} finally {
if (cluster != null) {
cosmosDBForPostgreSqlManager.clusters().deleteById(cluster.id());
}
}
} | .withNodeCount(0) | public void testCreateCluster() {
Cluster cluster = null;
String randomPadding = randomPadding();
try {
String clusterName = "cluster" + randomPadding;
String adminPwd = "Pass@" + randomPadding;
cluster = cosmosDBForPostgreSqlManager
.clusters()
.define(clusterName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withAdministratorLoginPassword(adminPwd)
.withPostgresqlVersion("15")
.withCitusVersion("12.1")
.withMaintenanceWindow(new MaintenanceWindow()
.withCustomWindow("Disabled")
.withDayOfWeek(0)
.withStartHour(0)
.withStartMinute(0))
.withEnableShardsOnCoordinator(true)
.withEnableHa(false)
.withCoordinatorServerEdition("GeneralPurpose")
.withNodeServerEdition("MemoryOptimized")
.withCoordinatorStorageQuotaInMb(131072)
.withNodeStorageQuotaInMb(524288)
.withCoordinatorVCores(2)
.withNodeVCores(4)
.withCoordinatorEnablePublicIpAccess(true)
.withNodeEnablePublicIpAccess(true)
.withNodeCount(0)
.create();
cluster.refresh();
Assertions.assertEquals(cluster.name(), clusterName);
Assertions.assertEquals(cluster.name(), cosmosDBForPostgreSqlManager.clusters().getById(cluster.id()).name());
Assertions.assertTrue(cosmosDBForPostgreSqlManager.clusters().list().stream().findAny().isPresent());
} finally {
if (cluster != null) {
cosmosDBForPostgreSqlManager.clusters().deleteById(cluster.id());
}
}
} | class CosmosDBForPostgreSqlManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private CosmosDBForPostgreSqlManager cosmosDBForPostgreSqlManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
cosmosDBForPostgreSqlManager = CosmosDBForPostgreSqlManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class CosmosDBForPostgreSqlManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private CosmosDBForPostgreSqlManager cosmosDBForPostgreSqlManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
cosmosDBForPostgreSqlManager = CosmosDBForPostgreSqlManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
consider: adding a quirks mode to fall-back to the in-house MI implementation in case of support incidents (to unblock customers), which can later be removed after we have full confidence in MSAL's new MI support. | public Mono<AccessToken> authenticate(TokenRequestContext request) {
if (getClientId() != null) {
return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException(
"User assigned identity is not supported by the Azure Arc Managed Identity Endpoint. To authenticate "
+ "with the system assigned identity omit the client id when constructing the"
+ " ManagedIdentityCredential.", null)));
}
return identityClient.authenticateWithManagedIdentityMsalClient(request);
} | return identityClient.authenticateWithManagedIdentityMsalClient(request); | public Mono<AccessToken> authenticate(TokenRequestContext request) {
if (getClientId() != null) {
return Mono.error(LOGGER.logExceptionAsError(new ClientAuthenticationException(
"User assigned identity is not supported by the Azure Arc Managed Identity Endpoint. To authenticate "
+ "with the system assigned identity omit the client id when constructing the"
+ " ManagedIdentityCredential.", null)));
}
return identityClient.authenticateWithManagedIdentityMsalClient(request);
} | class ArcIdentityCredential extends ManagedIdentityServiceCredential {
private static final ClientLogger LOGGER = new ClientLogger(ArcIdentityCredential.class);
private final String identityEndpoint;
/**
* Creates an instance of {@link ArcIdentityCredential}.
*
* @param clientId The client ID of user assigned or system assigned identity.
* @param identityClient The identity client to acquire a token with.
*/
ArcIdentityCredential(String clientId, IdentityClient identityClient) {
super(clientId, identityClient, "AZURE ARC IDENTITY ENDPOINT");
Configuration configuration = Configuration.getGlobalConfiguration().clone();
this.identityEndpoint = configuration.get(Configuration.PROPERTY_IDENTITY_ENDPOINT);
if (identityEndpoint != null) {
validateEndpointProtocol(this.identityEndpoint, "Identity", LOGGER);
}
}
/**
* Gets an access token for a token request.
*
* @param request The details of the token request.
* @return A publisher that emits an {@link AccessToken}.
*/
} | class ArcIdentityCredential extends ManagedIdentityServiceCredential {
private static final ClientLogger LOGGER = new ClientLogger(ArcIdentityCredential.class);
private final String identityEndpoint;
/**
* Creates an instance of {@link ArcIdentityCredential}.
*
* @param clientId The client ID of user assigned or system assigned identity.
* @param identityClient The identity client to acquire a token with.
*/
ArcIdentityCredential(String clientId, IdentityClient identityClient) {
super(clientId, identityClient, "AZURE ARC IDENTITY ENDPOINT");
Configuration configuration = Configuration.getGlobalConfiguration().clone();
this.identityEndpoint = configuration.get(Configuration.PROPERTY_IDENTITY_ENDPOINT);
if (identityEndpoint != null) {
validateEndpointProtocol(this.identityEndpoint, "Identity", LOGGER);
}
}
/**
* Gets an access token for a token request.
*
* @param request The details of the token request.
* @return A publisher that emits an {@link AccessToken}.
*/
} |
What does nodeCount=0 mean? | public void testCreateCluster() {
Cluster cluster = null;
String randomPadding = randomPadding();
try {
String clusterName = "cluster" + randomPadding;
String adminPwd = "Pass@" + randomPadding;
cluster = cosmosDBForPostgreSqlManager
.clusters()
.define(clusterName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withAdministratorLoginPassword(adminPwd)
.withPostgresqlVersion("15")
.withCitusVersion("12.1")
.withMaintenanceWindow(new MaintenanceWindow()
.withCustomWindow("Disabled")
.withDayOfWeek(0)
.withStartHour(0)
.withStartMinute(0))
.withEnableShardsOnCoordinator(true)
.withEnableHa(false)
.withCoordinatorServerEdition("GeneralPurpose")
.withNodeServerEdition("MemoryOptimized")
.withCoordinatorStorageQuotaInMb(131072)
.withNodeStorageQuotaInMb(524288)
.withCoordinatorVCores(2)
.withNodeVCores(4)
.withCoordinatorEnablePublicIpAccess(true)
.withNodeEnablePublicIpAccess(true)
.withNodeCount(0)
.create();
cluster.refresh();
Assertions.assertEquals(cluster.name(), clusterName);
Assertions.assertEquals(cluster.name(), cosmosDBForPostgreSqlManager.clusters().getById(cluster.id()).name());
Assertions.assertTrue(cosmosDBForPostgreSqlManager.clusters().list().stream().findAny().isPresent());
} finally {
if (cluster != null) {
cosmosDBForPostgreSqlManager.clusters().deleteById(cluster.id());
}
}
} | .withNodeCount(0) | public void testCreateCluster() {
Cluster cluster = null;
String randomPadding = randomPadding();
try {
String clusterName = "cluster" + randomPadding;
String adminPwd = "Pass@" + randomPadding;
cluster = cosmosDBForPostgreSqlManager
.clusters()
.define(clusterName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withAdministratorLoginPassword(adminPwd)
.withPostgresqlVersion("15")
.withCitusVersion("12.1")
.withMaintenanceWindow(new MaintenanceWindow()
.withCustomWindow("Disabled")
.withDayOfWeek(0)
.withStartHour(0)
.withStartMinute(0))
.withEnableShardsOnCoordinator(true)
.withEnableHa(false)
.withCoordinatorServerEdition("GeneralPurpose")
.withNodeServerEdition("MemoryOptimized")
.withCoordinatorStorageQuotaInMb(131072)
.withNodeStorageQuotaInMb(524288)
.withCoordinatorVCores(2)
.withNodeVCores(4)
.withCoordinatorEnablePublicIpAccess(true)
.withNodeEnablePublicIpAccess(true)
.withNodeCount(0)
.create();
cluster.refresh();
Assertions.assertEquals(cluster.name(), clusterName);
Assertions.assertEquals(cluster.name(), cosmosDBForPostgreSqlManager.clusters().getById(cluster.id()).name());
Assertions.assertTrue(cosmosDBForPostgreSqlManager.clusters().list().stream().findAny().isPresent());
} finally {
if (cluster != null) {
cosmosDBForPostgreSqlManager.clusters().deleteById(cluster.id());
}
}
} | class CosmosDBForPostgreSqlManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private CosmosDBForPostgreSqlManager cosmosDBForPostgreSqlManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
cosmosDBForPostgreSqlManager = CosmosDBForPostgreSqlManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class CosmosDBForPostgreSqlManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private CosmosDBForPostgreSqlManager cosmosDBForPostgreSqlManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
cosmosDBForPostgreSqlManager = CosmosDBForPostgreSqlManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
Got it, thanks. | public void testCreateCluster() {
Cluster cluster = null;
String randomPadding = randomPadding();
try {
String clusterName = "cluster" + randomPadding;
String adminPwd = "Pass@" + randomPadding;
cluster = cosmosDBForPostgreSqlManager
.clusters()
.define(clusterName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withAdministratorLoginPassword(adminPwd)
.withPostgresqlVersion("15")
.withCitusVersion("12.1")
.withMaintenanceWindow(new MaintenanceWindow()
.withCustomWindow("Disabled")
.withDayOfWeek(0)
.withStartHour(0)
.withStartMinute(0))
.withEnableShardsOnCoordinator(true)
.withEnableHa(false)
.withCoordinatorServerEdition("GeneralPurpose")
.withNodeServerEdition("MemoryOptimized")
.withCoordinatorStorageQuotaInMb(131072)
.withNodeStorageQuotaInMb(524288)
.withCoordinatorVCores(2)
.withNodeVCores(4)
.withCoordinatorEnablePublicIpAccess(true)
.withNodeEnablePublicIpAccess(true)
.withNodeCount(0)
.create();
cluster.refresh();
Assertions.assertEquals(cluster.name(), clusterName);
Assertions.assertEquals(cluster.name(), cosmosDBForPostgreSqlManager.clusters().getById(cluster.id()).name());
Assertions.assertTrue(cosmosDBForPostgreSqlManager.clusters().list().stream().findAny().isPresent());
} finally {
if (cluster != null) {
cosmosDBForPostgreSqlManager.clusters().deleteById(cluster.id());
}
}
} | .withNodeCount(0) | public void testCreateCluster() {
Cluster cluster = null;
String randomPadding = randomPadding();
try {
String clusterName = "cluster" + randomPadding;
String adminPwd = "Pass@" + randomPadding;
cluster = cosmosDBForPostgreSqlManager
.clusters()
.define(clusterName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withAdministratorLoginPassword(adminPwd)
.withPostgresqlVersion("15")
.withCitusVersion("12.1")
.withMaintenanceWindow(new MaintenanceWindow()
.withCustomWindow("Disabled")
.withDayOfWeek(0)
.withStartHour(0)
.withStartMinute(0))
.withEnableShardsOnCoordinator(true)
.withEnableHa(false)
.withCoordinatorServerEdition("GeneralPurpose")
.withNodeServerEdition("MemoryOptimized")
.withCoordinatorStorageQuotaInMb(131072)
.withNodeStorageQuotaInMb(524288)
.withCoordinatorVCores(2)
.withNodeVCores(4)
.withCoordinatorEnablePublicIpAccess(true)
.withNodeEnablePublicIpAccess(true)
.withNodeCount(0)
.create();
cluster.refresh();
Assertions.assertEquals(cluster.name(), clusterName);
Assertions.assertEquals(cluster.name(), cosmosDBForPostgreSqlManager.clusters().getById(cluster.id()).name());
Assertions.assertTrue(cosmosDBForPostgreSqlManager.clusters().list().stream().findAny().isPresent());
} finally {
if (cluster != null) {
cosmosDBForPostgreSqlManager.clusters().deleteById(cluster.id());
}
}
} | class CosmosDBForPostgreSqlManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private CosmosDBForPostgreSqlManager cosmosDBForPostgreSqlManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
cosmosDBForPostgreSqlManager = CosmosDBForPostgreSqlManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class CosmosDBForPostgreSqlManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private CosmosDBForPostgreSqlManager cosmosDBForPostgreSqlManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
cosmosDBForPostgreSqlManager = CosmosDBForPostgreSqlManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
This seems from a mismatch in TypeSpec source https://github.com/Azure/azure-rest-api-specs/blob/main/specification/batch/Azure.Batch/models.tsp#L1171-L1187 Here the type is int32 | public static FileProperties fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
OffsetDateTime lastModified = null;
long contentLength = 0L;
OffsetDateTime creationTime = null;
String contentType = null;
String fileMode = null;
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("lastModified".equals(fieldName)) {
lastModified = reader.getNullable(nonNullReader -> OffsetDateTime.parse(nonNullReader.getString()));
} else if ("contentLength".equals(fieldName)) {
if (reader.currentToken() == JsonToken.STRING) {
String contentLengthStr = reader.getString();
try {
contentLength = Long.parseLong(contentLengthStr);
} catch (NumberFormatException e) {
throw new IOException("Expected numeric contentLength, but found: " + contentLengthStr, e);
}
} else if (reader.currentToken() == JsonToken.NUMBER) {
contentLength = reader.getLong();
} else {
throw new IOException("Expected contentLength to be a number or string, but found other type");
}
} else if ("creationTime".equals(fieldName)) {
creationTime = reader.getNullable(nonNullReader -> OffsetDateTime.parse(nonNullReader.getString()));
} else if ("contentType".equals(fieldName)) {
contentType = reader.getString();
} else if ("fileMode".equals(fieldName)) {
fileMode = reader.getString();
} else {
reader.skipChildren();
}
}
FileProperties deserializedFileProperties = new FileProperties(lastModified, contentLength);
deserializedFileProperties.creationTime = creationTime;
deserializedFileProperties.contentType = contentType;
deserializedFileProperties.fileMode = fileMode;
return deserializedFileProperties;
});
} | } | public static FileProperties fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
OffsetDateTime lastModified = null;
long contentLength = 0L;
OffsetDateTime creationTime = null;
String contentType = null;
String fileMode = null;
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("lastModified".equals(fieldName)) {
lastModified = reader.getNullable(nonNullReader -> OffsetDateTime.parse(nonNullReader.getString()));
} else if ("contentLength".equals(fieldName)) {
if (reader.currentToken() == JsonToken.STRING) {
String contentLengthStr = reader.getString();
try {
contentLength = Long.parseLong(contentLengthStr);
} catch (NumberFormatException e) {
throw new IOException("Expected numeric contentLength, but found: " + contentLengthStr, e);
}
} else if (reader.currentToken() == JsonToken.NUMBER) {
contentLength = reader.getLong();
} else {
throw new IOException("Expected contentLength to be a number or string, but found other type");
}
} else if ("creationTime".equals(fieldName)) {
creationTime = reader.getNullable(nonNullReader -> OffsetDateTime.parse(nonNullReader.getString()));
} else if ("contentType".equals(fieldName)) {
contentType = reader.getString();
} else if ("fileMode".equals(fieldName)) {
fileMode = reader.getString();
} else {
reader.skipChildren();
}
}
FileProperties deserializedFileProperties = new FileProperties(lastModified, contentLength);
deserializedFileProperties.creationTime = creationTime;
deserializedFileProperties.contentType = contentType;
deserializedFileProperties.fileMode = fileMode;
return deserializedFileProperties;
});
} | class FileProperties implements JsonSerializable<FileProperties> {
/*
* The file creation time. The creation time is not returned for files on Linux Compute Nodes.
*/
@Generated
private OffsetDateTime creationTime;
/*
* The time at which the file was last modified.
*/
@Generated
private final OffsetDateTime lastModified;
/*
* The length of the file.
*/
@Generated
private final long contentLength;
/*
* The content type of the file.
*/
@Generated
private String contentType;
/*
* The file mode attribute in octal format. The file mode is returned only for files on Linux Compute Nodes.
*/
@Generated
private String fileMode;
/**
* Get the creationTime property: The file creation time. The creation time is not returned for files on Linux
* Compute Nodes.
*
* @return the creationTime value.
*/
@Generated
public OffsetDateTime getCreationTime() {
return this.creationTime;
}
/**
* Get the lastModified property: The time at which the file was last modified.
*
* @return the lastModified value.
*/
@Generated
public OffsetDateTime getLastModified() {
return this.lastModified;
}
/**
* Get the contentLength property: The length of the file.
*
* @return the contentLength value.
*/
@Generated
public long getContentLength() {
return this.contentLength;
}
/**
* Get the contentType property: The content type of the file.
*
* @return the contentType value.
*/
@Generated
public String getContentType() {
return this.contentType;
}
/**
* Get the fileMode property: The file mode attribute in octal format. The file mode is returned only for files on
* Linux Compute Nodes.
*
* @return the fileMode value.
*/
@Generated
public String getFileMode() {
return this.fileMode;
}
/**
* Creates an instance of FileProperties class.
*
* @param lastModified the lastModified value to set.
* @param contentLength the contentLength value to set.
*/
@Generated
private FileProperties(OffsetDateTime lastModified, long contentLength) {
this.lastModified = lastModified;
this.contentLength = contentLength;
}
/**
* {@inheritDoc}
*/
@Generated
@Override
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("lastModified",
this.lastModified == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.lastModified));
jsonWriter.writeLongField("contentLength", this.contentLength);
jsonWriter.writeStringField("creationTime",
this.creationTime == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.creationTime));
jsonWriter.writeStringField("contentType", this.contentType);
jsonWriter.writeStringField("fileMode", this.fileMode);
return jsonWriter.writeEndObject();
}
/**
* Reads an instance of FileProperties from the JsonReader.
*
* @param jsonReader The JsonReader being read.
* @return An instance of FileProperties if the JsonReader was pointing to an instance of it, or null if it was
* pointing to JSON null.
* @throws IllegalStateException If the deserialized JSON object was missing any required properties.
* @throws IOException If an error occurs while reading the FileProperties.
*/
} | class FileProperties implements JsonSerializable<FileProperties> {
/*
* The file creation time. The creation time is not returned for files on Linux Compute Nodes.
*/
@Generated
private OffsetDateTime creationTime;
/*
* The time at which the file was last modified.
*/
@Generated
private final OffsetDateTime lastModified;
/*
* The length of the file.
*/
@Generated
private final long contentLength;
/*
* The content type of the file.
*/
@Generated
private String contentType;
/*
* The file mode attribute in octal format. The file mode is returned only for files on Linux Compute Nodes.
*/
@Generated
private String fileMode;
/**
* Get the creationTime property: The file creation time. The creation time is not returned for files on Linux
* Compute Nodes.
*
* @return the creationTime value.
*/
@Generated
public OffsetDateTime getCreationTime() {
return this.creationTime;
}
/**
* Get the lastModified property: The time at which the file was last modified.
*
* @return the lastModified value.
*/
@Generated
public OffsetDateTime getLastModified() {
return this.lastModified;
}
/**
* Get the contentLength property: The length of the file.
*
* @return the contentLength value.
*/
@Generated
public long getContentLength() {
return this.contentLength;
}
/**
* Get the contentType property: The content type of the file.
*
* @return the contentType value.
*/
@Generated
public String getContentType() {
return this.contentType;
}
/**
* Get the fileMode property: The file mode attribute in octal format. The file mode is returned only for files on
* Linux Compute Nodes.
*
* @return the fileMode value.
*/
@Generated
public String getFileMode() {
return this.fileMode;
}
/**
* Creates an instance of FileProperties class.
*
* @param lastModified the lastModified value to set.
* @param contentLength the contentLength value to set.
*/
@Generated
private FileProperties(OffsetDateTime lastModified, long contentLength) {
this.lastModified = lastModified;
this.contentLength = contentLength;
}
/**
* {@inheritDoc}
*/
@Generated
@Override
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("lastModified",
this.lastModified == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.lastModified));
jsonWriter.writeLongField("contentLength", this.contentLength);
jsonWriter.writeStringField("creationTime",
this.creationTime == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.creationTime));
jsonWriter.writeStringField("contentType", this.contentType);
jsonWriter.writeStringField("fileMode", this.fileMode);
return jsonWriter.writeEndObject();
}
/**
* Reads an instance of FileProperties from the JsonReader.
*
* @param jsonReader The JsonReader being read.
* @return An instance of FileProperties if the JsonReader was pointing to an instance of it, or null if it was
* pointing to JSON null.
* @throws IllegalStateException If the deserialized JSON object was missing any required properties.
* @throws IOException If an error occurs while reading the FileProperties.
*/
} |
We actually found that this was a mistake in our TypeSpec. It is defined as a long (int64) in the service code and the management plane swagger, so we updated the TypeSpec to correctly define it as int64 and not int32: https://github.com/Azure/azure-rest-api-specs/blob/main/specification/batch/Azure.Batch/models.tsp#L1054 | public static FileProperties fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
OffsetDateTime lastModified = null;
long contentLength = 0L;
OffsetDateTime creationTime = null;
String contentType = null;
String fileMode = null;
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("lastModified".equals(fieldName)) {
lastModified = reader.getNullable(nonNullReader -> OffsetDateTime.parse(nonNullReader.getString()));
} else if ("contentLength".equals(fieldName)) {
if (reader.currentToken() == JsonToken.STRING) {
String contentLengthStr = reader.getString();
try {
contentLength = Long.parseLong(contentLengthStr);
} catch (NumberFormatException e) {
throw new IOException("Expected numeric contentLength, but found: " + contentLengthStr, e);
}
} else if (reader.currentToken() == JsonToken.NUMBER) {
contentLength = reader.getLong();
} else {
throw new IOException("Expected contentLength to be a number or string, but found other type");
}
} else if ("creationTime".equals(fieldName)) {
creationTime = reader.getNullable(nonNullReader -> OffsetDateTime.parse(nonNullReader.getString()));
} else if ("contentType".equals(fieldName)) {
contentType = reader.getString();
} else if ("fileMode".equals(fieldName)) {
fileMode = reader.getString();
} else {
reader.skipChildren();
}
}
FileProperties deserializedFileProperties = new FileProperties(lastModified, contentLength);
deserializedFileProperties.creationTime = creationTime;
deserializedFileProperties.contentType = contentType;
deserializedFileProperties.fileMode = fileMode;
return deserializedFileProperties;
});
} | } | public static FileProperties fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
OffsetDateTime lastModified = null;
long contentLength = 0L;
OffsetDateTime creationTime = null;
String contentType = null;
String fileMode = null;
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("lastModified".equals(fieldName)) {
lastModified = reader.getNullable(nonNullReader -> OffsetDateTime.parse(nonNullReader.getString()));
} else if ("contentLength".equals(fieldName)) {
if (reader.currentToken() == JsonToken.STRING) {
String contentLengthStr = reader.getString();
try {
contentLength = Long.parseLong(contentLengthStr);
} catch (NumberFormatException e) {
throw new IOException("Expected numeric contentLength, but found: " + contentLengthStr, e);
}
} else if (reader.currentToken() == JsonToken.NUMBER) {
contentLength = reader.getLong();
} else {
throw new IOException("Expected contentLength to be a number or string, but found other type");
}
} else if ("creationTime".equals(fieldName)) {
creationTime = reader.getNullable(nonNullReader -> OffsetDateTime.parse(nonNullReader.getString()));
} else if ("contentType".equals(fieldName)) {
contentType = reader.getString();
} else if ("fileMode".equals(fieldName)) {
fileMode = reader.getString();
} else {
reader.skipChildren();
}
}
FileProperties deserializedFileProperties = new FileProperties(lastModified, contentLength);
deserializedFileProperties.creationTime = creationTime;
deserializedFileProperties.contentType = contentType;
deserializedFileProperties.fileMode = fileMode;
return deserializedFileProperties;
});
} | class FileProperties implements JsonSerializable<FileProperties> {
/*
* The file creation time. The creation time is not returned for files on Linux Compute Nodes.
*/
@Generated
private OffsetDateTime creationTime;
/*
* The time at which the file was last modified.
*/
@Generated
private final OffsetDateTime lastModified;
/*
* The length of the file.
*/
@Generated
private final long contentLength;
/*
* The content type of the file.
*/
@Generated
private String contentType;
/*
* The file mode attribute in octal format. The file mode is returned only for files on Linux Compute Nodes.
*/
@Generated
private String fileMode;
/**
* Get the creationTime property: The file creation time. The creation time is not returned for files on Linux
* Compute Nodes.
*
* @return the creationTime value.
*/
@Generated
public OffsetDateTime getCreationTime() {
return this.creationTime;
}
/**
* Get the lastModified property: The time at which the file was last modified.
*
* @return the lastModified value.
*/
@Generated
public OffsetDateTime getLastModified() {
return this.lastModified;
}
/**
* Get the contentLength property: The length of the file.
*
* @return the contentLength value.
*/
@Generated
public long getContentLength() {
return this.contentLength;
}
/**
* Get the contentType property: The content type of the file.
*
* @return the contentType value.
*/
@Generated
public String getContentType() {
return this.contentType;
}
/**
* Get the fileMode property: The file mode attribute in octal format. The file mode is returned only for files on
* Linux Compute Nodes.
*
* @return the fileMode value.
*/
@Generated
public String getFileMode() {
return this.fileMode;
}
/**
* Creates an instance of FileProperties class.
*
* @param lastModified the lastModified value to set.
* @param contentLength the contentLength value to set.
*/
@Generated
private FileProperties(OffsetDateTime lastModified, long contentLength) {
this.lastModified = lastModified;
this.contentLength = contentLength;
}
/**
* {@inheritDoc}
*/
@Generated
@Override
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("lastModified",
this.lastModified == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.lastModified));
jsonWriter.writeLongField("contentLength", this.contentLength);
jsonWriter.writeStringField("creationTime",
this.creationTime == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.creationTime));
jsonWriter.writeStringField("contentType", this.contentType);
jsonWriter.writeStringField("fileMode", this.fileMode);
return jsonWriter.writeEndObject();
}
/**
* Reads an instance of FileProperties from the JsonReader.
*
* @param jsonReader The JsonReader being read.
* @return An instance of FileProperties if the JsonReader was pointing to an instance of it, or null if it was
* pointing to JSON null.
* @throws IllegalStateException If the deserialized JSON object was missing any required properties.
* @throws IOException If an error occurs while reading the FileProperties.
*/
} | class FileProperties implements JsonSerializable<FileProperties> {
/*
* The file creation time. The creation time is not returned for files on Linux Compute Nodes.
*/
@Generated
private OffsetDateTime creationTime;
/*
* The time at which the file was last modified.
*/
@Generated
private final OffsetDateTime lastModified;
/*
* The length of the file.
*/
@Generated
private final long contentLength;
/*
* The content type of the file.
*/
@Generated
private String contentType;
/*
* The file mode attribute in octal format. The file mode is returned only for files on Linux Compute Nodes.
*/
@Generated
private String fileMode;
/**
* Get the creationTime property: The file creation time. The creation time is not returned for files on Linux
* Compute Nodes.
*
* @return the creationTime value.
*/
@Generated
public OffsetDateTime getCreationTime() {
return this.creationTime;
}
/**
* Get the lastModified property: The time at which the file was last modified.
*
* @return the lastModified value.
*/
@Generated
public OffsetDateTime getLastModified() {
return this.lastModified;
}
/**
* Get the contentLength property: The length of the file.
*
* @return the contentLength value.
*/
@Generated
public long getContentLength() {
return this.contentLength;
}
/**
* Get the contentType property: The content type of the file.
*
* @return the contentType value.
*/
@Generated
public String getContentType() {
return this.contentType;
}
/**
* Get the fileMode property: The file mode attribute in octal format. The file mode is returned only for files on
* Linux Compute Nodes.
*
* @return the fileMode value.
*/
@Generated
public String getFileMode() {
return this.fileMode;
}
/**
* Creates an instance of FileProperties class.
*
* @param lastModified the lastModified value to set.
* @param contentLength the contentLength value to set.
*/
@Generated
private FileProperties(OffsetDateTime lastModified, long contentLength) {
this.lastModified = lastModified;
this.contentLength = contentLength;
}
/**
* {@inheritDoc}
*/
@Generated
@Override
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("lastModified",
this.lastModified == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.lastModified));
jsonWriter.writeLongField("contentLength", this.contentLength);
jsonWriter.writeStringField("creationTime",
this.creationTime == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.creationTime));
jsonWriter.writeStringField("contentType", this.contentType);
jsonWriter.writeStringField("fileMode", this.fileMode);
return jsonWriter.writeEndObject();
}
/**
* Reads an instance of FileProperties from the JsonReader.
*
* @param jsonReader The JsonReader being read.
* @return An instance of FileProperties if the JsonReader was pointing to an instance of it, or null if it was
* pointing to JSON null.
* @throws IllegalStateException If the deserialized JSON object was missing any required properties.
* @throws IOException If an error occurs while reading the FileProperties.
*/
} |
Got it. However, the record looks like a JSON String, not a JSON Numeric https://github.com/Azure/azure-sdk-assets/blob/java/batch/azure-compute-batch_aff1c3044f/java/sdk/batch/azure-compute-batch/src/test/resources/session-records/FileTests.canReadFromTaskFile.json#L259-L283 Please double check the JSON on wire. | public static FileProperties fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
OffsetDateTime lastModified = null;
long contentLength = 0L;
OffsetDateTime creationTime = null;
String contentType = null;
String fileMode = null;
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("lastModified".equals(fieldName)) {
lastModified = reader.getNullable(nonNullReader -> OffsetDateTime.parse(nonNullReader.getString()));
} else if ("contentLength".equals(fieldName)) {
if (reader.currentToken() == JsonToken.STRING) {
String contentLengthStr = reader.getString();
try {
contentLength = Long.parseLong(contentLengthStr);
} catch (NumberFormatException e) {
throw new IOException("Expected numeric contentLength, but found: " + contentLengthStr, e);
}
} else if (reader.currentToken() == JsonToken.NUMBER) {
contentLength = reader.getLong();
} else {
throw new IOException("Expected contentLength to be a number or string, but found other type");
}
} else if ("creationTime".equals(fieldName)) {
creationTime = reader.getNullable(nonNullReader -> OffsetDateTime.parse(nonNullReader.getString()));
} else if ("contentType".equals(fieldName)) {
contentType = reader.getString();
} else if ("fileMode".equals(fieldName)) {
fileMode = reader.getString();
} else {
reader.skipChildren();
}
}
FileProperties deserializedFileProperties = new FileProperties(lastModified, contentLength);
deserializedFileProperties.creationTime = creationTime;
deserializedFileProperties.contentType = contentType;
deserializedFileProperties.fileMode = fileMode;
return deserializedFileProperties;
});
} | } | public static FileProperties fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
OffsetDateTime lastModified = null;
long contentLength = 0L;
OffsetDateTime creationTime = null;
String contentType = null;
String fileMode = null;
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("lastModified".equals(fieldName)) {
lastModified = reader.getNullable(nonNullReader -> OffsetDateTime.parse(nonNullReader.getString()));
} else if ("contentLength".equals(fieldName)) {
if (reader.currentToken() == JsonToken.STRING) {
String contentLengthStr = reader.getString();
try {
contentLength = Long.parseLong(contentLengthStr);
} catch (NumberFormatException e) {
throw new IOException("Expected numeric contentLength, but found: " + contentLengthStr, e);
}
} else if (reader.currentToken() == JsonToken.NUMBER) {
contentLength = reader.getLong();
} else {
throw new IOException("Expected contentLength to be a number or string, but found other type");
}
} else if ("creationTime".equals(fieldName)) {
creationTime = reader.getNullable(nonNullReader -> OffsetDateTime.parse(nonNullReader.getString()));
} else if ("contentType".equals(fieldName)) {
contentType = reader.getString();
} else if ("fileMode".equals(fieldName)) {
fileMode = reader.getString();
} else {
reader.skipChildren();
}
}
FileProperties deserializedFileProperties = new FileProperties(lastModified, contentLength);
deserializedFileProperties.creationTime = creationTime;
deserializedFileProperties.contentType = contentType;
deserializedFileProperties.fileMode = fileMode;
return deserializedFileProperties;
});
} | class FileProperties implements JsonSerializable<FileProperties> {
/*
* The file creation time. The creation time is not returned for files on Linux Compute Nodes.
*/
@Generated
private OffsetDateTime creationTime;
/*
* The time at which the file was last modified.
*/
@Generated
private final OffsetDateTime lastModified;
/*
* The length of the file.
*/
@Generated
private final long contentLength;
/*
* The content type of the file.
*/
@Generated
private String contentType;
/*
* The file mode attribute in octal format. The file mode is returned only for files on Linux Compute Nodes.
*/
@Generated
private String fileMode;
/**
* Get the creationTime property: The file creation time. The creation time is not returned for files on Linux
* Compute Nodes.
*
* @return the creationTime value.
*/
@Generated
public OffsetDateTime getCreationTime() {
return this.creationTime;
}
/**
* Get the lastModified property: The time at which the file was last modified.
*
* @return the lastModified value.
*/
@Generated
public OffsetDateTime getLastModified() {
return this.lastModified;
}
/**
* Get the contentLength property: The length of the file.
*
* @return the contentLength value.
*/
@Generated
public long getContentLength() {
return this.contentLength;
}
/**
* Get the contentType property: The content type of the file.
*
* @return the contentType value.
*/
@Generated
public String getContentType() {
return this.contentType;
}
/**
* Get the fileMode property: The file mode attribute in octal format. The file mode is returned only for files on
* Linux Compute Nodes.
*
* @return the fileMode value.
*/
@Generated
public String getFileMode() {
return this.fileMode;
}
/**
* Creates an instance of FileProperties class.
*
* @param lastModified the lastModified value to set.
* @param contentLength the contentLength value to set.
*/
@Generated
private FileProperties(OffsetDateTime lastModified, long contentLength) {
this.lastModified = lastModified;
this.contentLength = contentLength;
}
/**
* {@inheritDoc}
*/
@Generated
@Override
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("lastModified",
this.lastModified == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.lastModified));
jsonWriter.writeLongField("contentLength", this.contentLength);
jsonWriter.writeStringField("creationTime",
this.creationTime == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.creationTime));
jsonWriter.writeStringField("contentType", this.contentType);
jsonWriter.writeStringField("fileMode", this.fileMode);
return jsonWriter.writeEndObject();
}
/**
* Reads an instance of FileProperties from the JsonReader.
*
* @param jsonReader The JsonReader being read.
* @return An instance of FileProperties if the JsonReader was pointing to an instance of it, or null if it was
* pointing to JSON null.
* @throws IllegalStateException If the deserialized JSON object was missing any required properties.
* @throws IOException If an error occurs while reading the FileProperties.
*/
} | class FileProperties implements JsonSerializable<FileProperties> {
/*
* The file creation time. The creation time is not returned for files on Linux Compute Nodes.
*/
@Generated
private OffsetDateTime creationTime;
/*
* The time at which the file was last modified.
*/
@Generated
private final OffsetDateTime lastModified;
/*
* The length of the file.
*/
@Generated
private final long contentLength;
/*
* The content type of the file.
*/
@Generated
private String contentType;
/*
* The file mode attribute in octal format. The file mode is returned only for files on Linux Compute Nodes.
*/
@Generated
private String fileMode;
/**
* Get the creationTime property: The file creation time. The creation time is not returned for files on Linux
* Compute Nodes.
*
* @return the creationTime value.
*/
@Generated
public OffsetDateTime getCreationTime() {
return this.creationTime;
}
/**
* Get the lastModified property: The time at which the file was last modified.
*
* @return the lastModified value.
*/
@Generated
public OffsetDateTime getLastModified() {
return this.lastModified;
}
/**
* Get the contentLength property: The length of the file.
*
* @return the contentLength value.
*/
@Generated
public long getContentLength() {
return this.contentLength;
}
/**
* Get the contentType property: The content type of the file.
*
* @return the contentType value.
*/
@Generated
public String getContentType() {
return this.contentType;
}
/**
* Get the fileMode property: The file mode attribute in octal format. The file mode is returned only for files on
* Linux Compute Nodes.
*
* @return the fileMode value.
*/
@Generated
public String getFileMode() {
return this.fileMode;
}
/**
* Creates an instance of FileProperties class.
*
* @param lastModified the lastModified value to set.
* @param contentLength the contentLength value to set.
*/
@Generated
private FileProperties(OffsetDateTime lastModified, long contentLength) {
this.lastModified = lastModified;
this.contentLength = contentLength;
}
/**
* {@inheritDoc}
*/
@Generated
@Override
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeStringField("lastModified",
this.lastModified == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.lastModified));
jsonWriter.writeLongField("contentLength", this.contentLength);
jsonWriter.writeStringField("creationTime",
this.creationTime == null ? null : DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(this.creationTime));
jsonWriter.writeStringField("contentType", this.contentType);
jsonWriter.writeStringField("fileMode", this.fileMode);
return jsonWriter.writeEndObject();
}
/**
* Reads an instance of FileProperties from the JsonReader.
*
* @param jsonReader The JsonReader being read.
* @return An instance of FileProperties if the JsonReader was pointing to an instance of it, or null if it was
* pointing to JSON null.
* @throws IllegalStateException If the deserialized JSON object was missing any required properties.
* @throws IOException If an error occurs while reading the FileProperties.
*/
} |
This properties isn't set anywhere | AzureServiceBusJmsConnectionFactoryCustomizer amqpOpenPropertiesCustomizer(ObjectProvider<AzureServiceBusJmsCredentialSupplier> azureServiceBusJmsCredentialSupplier) {
return factory -> {
final Map<String, Object> properties = new HashMap<>();
if (azureServiceBusJmsCredentialSupplier.getIfAvailable() != null) {
properties.put("user-agent", AZURE_SPRING_PASSWORDLESS_SERVICE_BUS);
} else {
properties.put("user-agent", AZURE_SPRING_SERVICE_BUS);
}
};
} | final Map<String, Object> properties = new HashMap<>(); | AzureServiceBusJmsConnectionFactoryCustomizer amqpOpenPropertiesCustomizer(ObjectProvider<AzureServiceBusJmsCredentialSupplier> azureServiceBusJmsCredentialSupplier) {
return factory -> {
JmsConnectionFactory jmsFactory = (JmsConnectionFactory) ReflectionUtils.getField(ServiceBusJmsConnectionFactory.class, "factory", factory);
EnumMap<JmsConnectionExtensions, BiFunction<Connection, URI, Object>> extensionMap =
(EnumMap) ReflectionUtils.getField(JmsConnectionFactory.class, "extensionMap", jmsFactory);
if (extensionMap.containsKey(JmsConnectionExtensions.AMQP_OPEN_PROPERTIES) ) {
Map<String, Object> properties = (Map) extensionMap.get(JmsConnectionExtensions.AMQP_OPEN_PROPERTIES).apply(null, null);
if (properties.containsKey("com.microsoft:is-client-provider")) {
jmsFactory.setExtension(JmsConnectionExtensions.AMQP_OPEN_PROPERTIES.toString(),
(connection, uri) -> {
properties.remove("com.microsoft:is-client-provider");
return properties;
});
}
}
};
} | class ServiceBusJmsAutoConfiguration {
@Bean
AzureServiceBusJmsProperties serviceBusJmsProperties(AzureGlobalProperties azureGlobalProperties) {
AzureServiceBusJmsProperties properties = new AzureServiceBusJmsProperties();
return mergeAzureProperties(azureGlobalProperties, properties);
}
@Bean
@ConditionalOnExpression("'premium'.equalsIgnoreCase('${spring.jms.servicebus.pricing-tier}')")
/**
* The BeanPostProcessor to instrument the {@link AzureServiceBusJmsProperties} bean with provided connection string
* providers.
* @param connectionStringProviders the connection string providers to provide the Service Bus connection string.
* @return the bean post processor.
*/
@Bean
@ConditionalOnMissingBean
@ConditionalOnMissingProperty(prefix = "spring.jms.servicebus", name = "connection-string")
static AzureServiceBusJmsPropertiesBeanPostProcessor azureServiceBusJmsPropertiesBeanPostProcessor(
ObjectProvider<ServiceConnectionStringProvider<AzureServiceType.ServiceBus>> connectionStringProviders) {
return new AzureServiceBusJmsPropertiesBeanPostProcessor(connectionStringProviders);
}
private AzureServiceBusJmsProperties mergeAzureProperties(AzureGlobalProperties azureGlobalProperties, AzureServiceBusJmsProperties azurePasswordlessProperties) {
AzureServiceBusJmsProperties mergedProperties = new AzureServiceBusJmsProperties();
AzurePasswordlessPropertiesUtils.mergeAzureCommonProperties(azureGlobalProperties, azurePasswordlessProperties, mergedProperties);
return mergedProperties;
}
} | class ServiceBusJmsAutoConfiguration {
@Bean
AzureServiceBusJmsProperties serviceBusJmsProperties(AzureGlobalProperties azureGlobalProperties) {
AzureServiceBusJmsProperties properties = new AzureServiceBusJmsProperties();
return mergeAzureProperties(azureGlobalProperties, properties);
}
@Bean
@ConditionalOnExpression("'standard'.equalsIgnoreCase('${spring.jms.servicebus.pricing-tier}')")
@SuppressWarnings("unchecked")
/**
* The BeanPostProcessor to instrument the {@link AzureServiceBusJmsProperties} bean with provided connection string
* providers.
* @param connectionStringProviders the connection string providers to provide the Service Bus connection string.
* @return the bean post processor.
*/
@Bean
@ConditionalOnMissingBean
@ConditionalOnMissingProperty(prefix = "spring.jms.servicebus", name = "connection-string")
static AzureServiceBusJmsPropertiesBeanPostProcessor azureServiceBusJmsPropertiesBeanPostProcessor(
ObjectProvider<ServiceConnectionStringProvider<AzureServiceType.ServiceBus>> connectionStringProviders) {
return new AzureServiceBusJmsPropertiesBeanPostProcessor(connectionStringProviders);
}
private AzureServiceBusJmsProperties mergeAzureProperties(AzureGlobalProperties azureGlobalProperties, AzureServiceBusJmsProperties azurePasswordlessProperties) {
AzureServiceBusJmsProperties mergedProperties = new AzureServiceBusJmsProperties();
AzurePasswordlessPropertiesUtils.mergeAzureCommonProperties(azureGlobalProperties, azurePasswordlessProperties, mergedProperties);
return mergedProperties;
}
} |
This is not correct, since the sdk has already set this property in their implementation. The only thing we can do right now is to ignore our useragent. | AzureServiceBusJmsConnectionFactoryCustomizer amqpOpenPropertiesCustomizer(ObjectProvider<AzureServiceBusJmsCredentialSupplier> azureServiceBusJmsCredentialSupplier) {
return factory -> {
final Map<String, Object> properties = new HashMap<>();
if (azureServiceBusJmsCredentialSupplier.getIfAvailable() != null) {
properties.put("user-agent", AZURE_SPRING_PASSWORDLESS_SERVICE_BUS);
} else {
properties.put("user-agent", AZURE_SPRING_SERVICE_BUS);
}
JmsConnectionFactory jmsFactory = (JmsConnectionFactory) ReflectionUtils.getField(ServiceBusJmsConnectionFactory.class, "factory", factory);
jmsFactory.setExtension(JmsConnectionExtensions.AMQP_OPEN_PROPERTIES.toString(),
(connection, uri) -> properties);
};
} | jmsFactory.setExtension(JmsConnectionExtensions.AMQP_OPEN_PROPERTIES.toString(), | AzureServiceBusJmsConnectionFactoryCustomizer amqpOpenPropertiesCustomizer(ObjectProvider<AzureServiceBusJmsCredentialSupplier> azureServiceBusJmsCredentialSupplier) {
return factory -> {
JmsConnectionFactory jmsFactory = (JmsConnectionFactory) ReflectionUtils.getField(ServiceBusJmsConnectionFactory.class, "factory", factory);
EnumMap<JmsConnectionExtensions, BiFunction<Connection, URI, Object>> extensionMap =
(EnumMap) ReflectionUtils.getField(JmsConnectionFactory.class, "extensionMap", jmsFactory);
if (extensionMap.containsKey(JmsConnectionExtensions.AMQP_OPEN_PROPERTIES) ) {
Map<String, Object> properties = (Map) extensionMap.get(JmsConnectionExtensions.AMQP_OPEN_PROPERTIES).apply(null, null);
if (properties.containsKey("com.microsoft:is-client-provider")) {
jmsFactory.setExtension(JmsConnectionExtensions.AMQP_OPEN_PROPERTIES.toString(),
(connection, uri) -> {
properties.remove("com.microsoft:is-client-provider");
return properties;
});
}
}
};
} | class ServiceBusJmsAutoConfiguration {
@Bean
AzureServiceBusJmsProperties serviceBusJmsProperties(AzureGlobalProperties azureGlobalProperties) {
AzureServiceBusJmsProperties properties = new AzureServiceBusJmsProperties();
return mergeAzureProperties(azureGlobalProperties, properties);
}
@Bean
@ConditionalOnExpression("'premium'.equalsIgnoreCase('${spring.jms.servicebus.pricing-tier}')")
/**
* The BeanPostProcessor to instrument the {@link AzureServiceBusJmsProperties} bean with provided connection string
* providers.
* @param connectionStringProviders the connection string providers to provide the Service Bus connection string.
* @return the bean post processor.
*/
@Bean
@ConditionalOnMissingBean
@ConditionalOnMissingProperty(prefix = "spring.jms.servicebus", name = "connection-string")
static AzureServiceBusJmsPropertiesBeanPostProcessor azureServiceBusJmsPropertiesBeanPostProcessor(
ObjectProvider<ServiceConnectionStringProvider<AzureServiceType.ServiceBus>> connectionStringProviders) {
return new AzureServiceBusJmsPropertiesBeanPostProcessor(connectionStringProviders);
}
private AzureServiceBusJmsProperties mergeAzureProperties(AzureGlobalProperties azureGlobalProperties, AzureServiceBusJmsProperties azurePasswordlessProperties) {
AzureServiceBusJmsProperties mergedProperties = new AzureServiceBusJmsProperties();
AzurePasswordlessPropertiesUtils.mergeAzureCommonProperties(azureGlobalProperties, azurePasswordlessProperties, mergedProperties);
return mergedProperties;
}
} | class ServiceBusJmsAutoConfiguration {
@Bean
AzureServiceBusJmsProperties serviceBusJmsProperties(AzureGlobalProperties azureGlobalProperties) {
AzureServiceBusJmsProperties properties = new AzureServiceBusJmsProperties();
return mergeAzureProperties(azureGlobalProperties, properties);
}
@Bean
@ConditionalOnExpression("'standard'.equalsIgnoreCase('${spring.jms.servicebus.pricing-tier}')")
@SuppressWarnings("unchecked")
/**
* The BeanPostProcessor to instrument the {@link AzureServiceBusJmsProperties} bean with provided connection string
* providers.
* @param connectionStringProviders the connection string providers to provide the Service Bus connection string.
* @return the bean post processor.
*/
@Bean
@ConditionalOnMissingBean
@ConditionalOnMissingProperty(prefix = "spring.jms.servicebus", name = "connection-string")
static AzureServiceBusJmsPropertiesBeanPostProcessor azureServiceBusJmsPropertiesBeanPostProcessor(
ObjectProvider<ServiceConnectionStringProvider<AzureServiceType.ServiceBus>> connectionStringProviders) {
return new AzureServiceBusJmsPropertiesBeanPostProcessor(connectionStringProviders);
}
private AzureServiceBusJmsProperties mergeAzureProperties(AzureGlobalProperties azureGlobalProperties, AzureServiceBusJmsProperties azurePasswordlessProperties) {
AzureServiceBusJmsProperties mergedProperties = new AzureServiceBusJmsProperties();
AzurePasswordlessPropertiesUtils.mergeAzureCommonProperties(azureGlobalProperties, azurePasswordlessProperties, mergedProperties);
return mergedProperties;
}
} |
It is better not to create resource nested here. Just have a line before `applicationInsightsManager` to do this. | public void testCreateComponent() {
ApplicationInsightsComponent component = null;
String randomPadding = randomPadding();
try {
String componentName = "component" + randomPadding;
String spaceName = "space" + randomPadding;
component = applicationInsightsManager.components()
.define(componentName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withKind("web")
.withApplicationType(ApplicationType.WEB)
.withWorkspaceResourceId(
logAnalyticsManager.workspaces()
.define(spaceName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withSku(new WorkspaceSku().withName(WorkspaceSkuNameEnum.PER_GB2018))
.withFeatures(new WorkspaceFeatures().withEnableLogAccessUsingOnlyResourcePermissions(true))
.withWorkspaceCapping(new WorkspaceCapping().withDailyQuotaGb(-1D))
.withRetentionInDays(30)
.withPublicNetworkAccessForIngestion(com.azure.resourcemanager.loganalytics.models.PublicNetworkAccessType.ENABLED)
.withPublicNetworkAccessForQuery(com.azure.resourcemanager.loganalytics.models.PublicNetworkAccessType.ENABLED)
.create()
.id()
)
.withIngestionMode(IngestionMode.LOG_ANALYTICS)
.withPublicNetworkAccessForIngestion(PublicNetworkAccessType.ENABLED)
.withPublicNetworkAccessForQuery(PublicNetworkAccessType.ENABLED)
.create();
component.refresh();
Assertions.assertEquals(component.name(), componentName);
Assertions.assertEquals(component.name(), applicationInsightsManager.components().getById(component.id()).name());
Assertions.assertTrue(applicationInsightsManager.components().list().stream().findAny().isPresent());
} finally {
if (component != null) {
applicationInsightsManager.components().deleteById(component.id());
}
}
} | .id() | public void testCreateComponent() {
ApplicationInsightsComponent component = null;
String randomPadding = randomPadding();
try {
String componentName = "component" + randomPadding;
String spaceName = "space" + randomPadding;
Workspace workspace = logAnalyticsManager.workspaces()
.define(spaceName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.create();
component = applicationInsightsManager.components()
.define(componentName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withKind("web")
.withApplicationType(ApplicationType.WEB)
.withWorkspaceResourceId(workspace.id())
.withIngestionMode(IngestionMode.LOG_ANALYTICS)
.create();
component.refresh();
Assertions.assertEquals(component.name(), componentName);
Assertions.assertEquals(component.name(), applicationInsightsManager.components().getById(component.id()).name());
Assertions.assertTrue(applicationInsightsManager.components().list().stream().findAny().isPresent());
} finally {
if (component != null) {
applicationInsightsManager.components().deleteById(component.id());
}
}
} | class ApplicationInsightsManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ApplicationInsightsManager applicationInsightsManager;
private LogAnalyticsManager logAnalyticsManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
applicationInsightsManager = ApplicationInsightsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
logAnalyticsManager = LogAnalyticsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class ApplicationInsightsManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ApplicationInsightsManager applicationInsightsManager;
private LogAnalyticsManager logAnalyticsManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
applicationInsightsManager = ApplicationInsightsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
logAnalyticsManager = LogAnalyticsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
Downgrades (requested consistency less than account default is only applicable for reads/queries) | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return this.diagnosticsClientConfig.getMachineId();
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnostics(
CosmosQueryRequestOptions requestOptions,
CosmosException exception) {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, cancellationException);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, exception);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getRxDocumentServiceResponseMonoWithE2ETimeout(
RxDocumentServiceRequest request,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono) {
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return Mono.error(getNegativeTimeoutException(request, endToEndTimeout));
}
request.requestContext.setEndToEndOperationLatencyPolicyConfig(endToEndPolicyConfig);
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationException(request, throwable));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationException(RxDocumentServiceRequest request, Throwable throwable) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
if (request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(RxDocumentServiceRequest request, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (request != null && request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)));
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return (options != null && options.getCosmosEndToEndLatencyPolicyConfig() != null) ?
options.getCosmosEndToEndLatencyPolicyConfig() : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
options,
documentClientRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = patch(request, retryPolicyInstance);
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
options,
requestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = this
.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance, endToEndPolicyConfig, clientContextOverride), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
DiagnosticsClientContext effectiveClientContext;
ScopedDiagnosticsFactory diagnosticsFactory;
if (orderedApplicableRegionsForSpeculation.size() < 2) {
effectiveClientContext = this;
diagnosticsFactory = null;
} else {
diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
effectiveClientContext = diagnosticsFactory;
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
effectiveClientContext,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
effectiveClientContext,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) {
logger.debug("getFeedRange collectionLink=[{}]", collectionLink);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = req
.requestContext
.getEndToEndOperationLatencyPolicyConfig();
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return this.diagnosticsClientConfig.getMachineId();
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnostics(
CosmosQueryRequestOptions requestOptions,
CosmosException exception) {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, cancellationException);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, exception);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getRxDocumentServiceResponseMonoWithE2ETimeout(
RxDocumentServiceRequest request,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono) {
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return Mono.error(getNegativeTimeoutException(request, endToEndTimeout));
}
request.requestContext.setEndToEndOperationLatencyPolicyConfig(endToEndPolicyConfig);
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationException(request, throwable));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationException(RxDocumentServiceRequest request, Throwable throwable) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
if (request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(RxDocumentServiceRequest request, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (request != null && request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)));
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return this.getEffectiveEndToEndOperationLatencyPolicyConfig(
options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null
);
}
private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig(
CosmosEndToEndOperationLatencyPolicyConfig policyConfig) {
return policyConfig != null ? policyConfig : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
options,
documentClientRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = patch(request, retryPolicyInstance);
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
options,
requestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = this
.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance, endToEndPolicyConfig, clientContextOverride), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
DiagnosticsClientContext effectiveClientContext;
ScopedDiagnosticsFactory diagnosticsFactory;
if (orderedApplicableRegionsForSpeculation.size() < 2) {
effectiveClientContext = this;
diagnosticsFactory = null;
} else {
diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
effectiveClientContext = diagnosticsFactory;
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
effectiveClientContext,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
effectiveClientContext,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) {
logger.debug("getFeedRange collectionLink=[{}]", collectionLink);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
this.getEffectiveEndToEndOperationLatencyPolicyConfig(
req.requestContext.getEndToEndOperationLatencyPolicyConfig());
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} |
Fixed in the new version. | public void testCreateComponent() {
ApplicationInsightsComponent component = null;
String randomPadding = randomPadding();
try {
String componentName = "component" + randomPadding;
String spaceName = "space" + randomPadding;
component = applicationInsightsManager.components()
.define(componentName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withKind("web")
.withApplicationType(ApplicationType.WEB)
.withWorkspaceResourceId(
logAnalyticsManager.workspaces()
.define(spaceName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withSku(new WorkspaceSku().withName(WorkspaceSkuNameEnum.PER_GB2018))
.withFeatures(new WorkspaceFeatures().withEnableLogAccessUsingOnlyResourcePermissions(true))
.withWorkspaceCapping(new WorkspaceCapping().withDailyQuotaGb(-1D))
.withRetentionInDays(30)
.withPublicNetworkAccessForIngestion(com.azure.resourcemanager.loganalytics.models.PublicNetworkAccessType.ENABLED)
.withPublicNetworkAccessForQuery(com.azure.resourcemanager.loganalytics.models.PublicNetworkAccessType.ENABLED)
.create()
.id()
)
.withIngestionMode(IngestionMode.LOG_ANALYTICS)
.withPublicNetworkAccessForIngestion(PublicNetworkAccessType.ENABLED)
.withPublicNetworkAccessForQuery(PublicNetworkAccessType.ENABLED)
.create();
component.refresh();
Assertions.assertEquals(component.name(), componentName);
Assertions.assertEquals(component.name(), applicationInsightsManager.components().getById(component.id()).name());
Assertions.assertTrue(applicationInsightsManager.components().list().stream().findAny().isPresent());
} finally {
if (component != null) {
applicationInsightsManager.components().deleteById(component.id());
}
}
} | .id() | public void testCreateComponent() {
ApplicationInsightsComponent component = null;
String randomPadding = randomPadding();
try {
String componentName = "component" + randomPadding;
String spaceName = "space" + randomPadding;
Workspace workspace = logAnalyticsManager.workspaces()
.define(spaceName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.create();
component = applicationInsightsManager.components()
.define(componentName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withKind("web")
.withApplicationType(ApplicationType.WEB)
.withWorkspaceResourceId(workspace.id())
.withIngestionMode(IngestionMode.LOG_ANALYTICS)
.create();
component.refresh();
Assertions.assertEquals(component.name(), componentName);
Assertions.assertEquals(component.name(), applicationInsightsManager.components().getById(component.id()).name());
Assertions.assertTrue(applicationInsightsManager.components().list().stream().findAny().isPresent());
} finally {
if (component != null) {
applicationInsightsManager.components().deleteById(component.id());
}
}
} | class ApplicationInsightsManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ApplicationInsightsManager applicationInsightsManager;
private LogAnalyticsManager logAnalyticsManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
applicationInsightsManager = ApplicationInsightsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
logAnalyticsManager = LogAnalyticsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class ApplicationInsightsManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ApplicationInsightsManager applicationInsightsManager;
private LogAnalyticsManager logAnalyticsManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
applicationInsightsManager = ApplicationInsightsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
logAnalyticsManager = LogAnalyticsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
test here others are just refactor | public void canCRUDProbes() {
String appGatewayName = generateRandomResourceName("agw", 15);
String probeName = "probe1";
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
ApplicationGateway appGateway = networkManager.applicationGateways().define(appGatewayName)
.withRegion(REGION)
.withNewResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.attach()
.defineProbe(probeName)
.withHostNameFromBackendHttpSettings()
.withPath("/")
.withHttp()
.withTimeoutInSeconds(10)
.withTimeBetweenProbesInSeconds(9)
.withRetriesBeforeUnhealthy(5)
.withHealthyHttpResponseStatusCodeRange(200, 249)
.attach()
.withExistingPublicIpAddress(pip)
.create();
Assertions.assertEquals(1, appGateway.probes().size());
Assertions.assertNull(appGateway.probes().get(probeName).host());
Assertions.assertTrue(appGateway.probes().get(probeName).isHostNameFromBackendHttpSettings());
appGateway.update()
.updateProbe(probeName)
.withoutHostNameFromBackendHttpSettings()
.withHost("microsoft.com")
.parent()
.apply();
Assertions.assertEquals(1, appGateway.probes().size());
Assertions.assertNotNull(appGateway.probes().get(probeName).host());
Assertions.assertFalse(appGateway.probes().get(probeName).isHostNameFromBackendHttpSettings());
appGateway.update()
.withoutProbe(probeName)
.apply();
Assertions.assertTrue(appGateway.probes().isEmpty());
} | } | public void canCRUDProbes() {
String appGatewayName = generateRandomResourceName("agw", 15);
String probeName = "probe1";
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
ApplicationGateway appGateway = networkManager.applicationGateways().define(appGatewayName)
.withRegion(REGION)
.withNewResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.attach()
.defineProbe(probeName)
.withHostNameFromBackendHttpSettings()
.withPath("/")
.withHttp()
.withTimeoutInSeconds(10)
.withTimeBetweenProbesInSeconds(9)
.withRetriesBeforeUnhealthy(5)
.withHealthyHttpResponseStatusCodeRange(200, 249)
.attach()
.withExistingPublicIpAddress(pip)
.create();
Assertions.assertEquals(1, appGateway.probes().size());
Assertions.assertNull(appGateway.probes().get(probeName).host());
Assertions.assertTrue(appGateway.probes().get(probeName).isHostNameFromBackendHttpSettings());
appGateway.update()
.updateProbe(probeName)
.withoutHostNameFromBackendHttpSettings()
.withHost("microsoft.com")
.parent()
.apply();
Assertions.assertEquals(1, appGateway.probes().size());
Assertions.assertNotNull(appGateway.probes().get(probeName).host());
Assertions.assertFalse(appGateway.probes().get(probeName).isHostNameFromBackendHttpSettings());
appGateway.update()
.withoutProbe(probeName)
.apply();
Assertions.assertTrue(appGateway.probes().isEmpty());
} | class ApplicationGatewayTests extends NetworkManagementTest {
private static final Region REGION = Region.US_EAST;
@Test
public void canCRUDApplicationGatewayWithWAF() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertNotNull(appGateway);
Assertions.assertEquals(ApplicationGatewayTier.WAF_V2, appGateway.tier());
Assertions.assertEquals(ApplicationGatewaySkuName.WAF_V2, appGateway.size());
Assertions.assertEquals(2, appGateway.autoscaleConfiguration().minCapacity());
Assertions.assertEquals(5, (int) appGateway.autoscaleConfiguration().maxCapacity());
ApplicationGatewayWebApplicationFirewallConfiguration config = appGateway.webApplicationFirewallConfiguration();
config.withFileUploadLimitInMb(200);
config
.withDisabledRuleGroups(
Arrays
.asList(
new ApplicationGatewayFirewallDisabledRuleGroup()
.withRuleGroupName("REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION")));
config.withRequestBodyCheck(true);
config.withMaxRequestBodySizeInKb(64);
config
.withExclusions(
Arrays
.asList(
new ApplicationGatewayFirewallExclusion()
.withMatchVariable("RequestHeaderNames")
.withSelectorMatchOperator("StartsWith")
.withSelector("User-Agent")));
appGateway.update().withWebApplicationFirewall(config).apply();
appGateway.refresh();
Assertions.assertEquals(200, (int) appGateway.webApplicationFirewallConfiguration().fileUploadLimitInMb());
Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().requestBodyCheck());
Assertions.assertEquals(64, (int) appGateway.webApplicationFirewallConfiguration().maxRequestBodySizeInKb());
Assertions.assertEquals(1, appGateway.webApplicationFirewallConfiguration().exclusions().size());
Assertions.assertEquals(
"RequestHeaderNames",
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).matchVariable());
Assertions.assertEquals(
"StartsWith",
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selectorMatchOperator());
Assertions.assertEquals(
"User-Agent",
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selector());
Assertions.assertEquals(1, appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().size());
Assertions.assertEquals(
"REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION",
appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().get(0).ruleGroupName());
}
@Test
public void canSpecifyWildcardListeners() {
String appGatewayName = generateRandomResourceName("agwaf", 15);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
String listener1 = "listener1";
String hostname1 = "my.contoso.com";
ApplicationGateway gateway = networkManager.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule80")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.withCookieBasedAffinity()
.attach()
.defineListener(listener1)
.withPublicFrontend()
.withFrontendPort(9000)
.withHttp()
.withHostname(hostname1)
.attach()
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withExistingPublicIpAddress(pip)
.create();
Assertions.assertEquals(hostname1, gateway.listeners().get(listener1).hostname());
String hostname2 = "*.contoso.com";
gateway.update()
.updateListener(listener1)
.withHostname(hostname2)
.parent()
.apply();
Assertions.assertEquals(hostname2, gateway.listeners().get(listener1).hostname());
List<String> hostnames = new ArrayList<>();
hostnames.add(hostname1);
hostnames.add(hostname2);
gateway.update()
.updateListener(listener1)
.withHostnames(hostnames)
.parent()
.apply();
Assertions.assertEquals(hostnames, gateway.listeners().get(listener1).hostnames());
}
@Test
@DoNotRecord(skipInPlayback = true)
public void canCreateApplicationGatewayWithSecret() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String identityName = generateRandomResourceName("id", 10);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
Identity identity =
msiManager
.identities()
.define(identityName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.create();
Assertions.assertNotNull(identity.name());
Assertions.assertNotNull(identity.principalId());
Secret secret1 = createKeyVaultSecret(clientIdFromFile(), identity.principalId());
Secret secret2 = createKeyVaultSecret(clientIdFromFile(), identity.principalId());
ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity);
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpsPort(443)
.withSslCertificate("ssl1")
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withIdentity(serviceIdentity)
.defineSslCertificate("ssl1")
.withKeyVaultSecretId(secret1.id())
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertEquals(secret1.id(), appGateway.sslCertificates().get("ssl1").keyVaultSecretId());
Assertions
.assertEquals(
secret1.id(), appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId());
appGateway =
appGateway.update().defineSslCertificate("ssl2").withKeyVaultSecretId(secret2.id()).attach().apply();
Assertions.assertEquals(secret2.id(), appGateway.sslCertificates().get("ssl2").keyVaultSecretId());
}
@Test
@DoNotRecord(skipInPlayback = true)
public void canCreateApplicationGatewayWithSslCertificate() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String identityName = generateRandomResourceName("id", 10);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
Identity identity =
msiManager
.identities()
.define(identityName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.create();
Assertions.assertNotNull(identity.name());
Assertions.assertNotNull(identity.principalId());
ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity);
String secretId = createKeyVaultCertificate(clientIdFromFile(), identity.principalId());
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpsPort(443)
.withSslCertificate("ssl1")
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withIdentity(serviceIdentity)
.defineSslCertificate("ssl1")
.withKeyVaultSecretId(secretId)
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertEquals(secretId, appGateway.sslCertificates().get("ssl1").keyVaultSecretId());
Assertions.assertEquals(secretId, appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId());
}
@Test
public void canAutoAssignPriorityForRequestRoutingRulesWithWAF() {
String appGatewayName = generateRandomResourceName("agwaf", 15);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.defineRequestRoutingRule("rule2")
.fromPublicFrontend()
.fromFrontendHttpPort(81)
.toBackendHttpPort(8181)
.toBackendIPAddress("11.1.1.3")
.attach()
.defineRequestRoutingRule("rule3")
.fromPublicFrontend()
.fromFrontendHttpPort(83)
.toBackendHttpPort(8383)
.toBackendIPAddress("11.1.1.4")
.withPriority(1)
.attach()
.defineRequestRoutingRule("rule4")
.fromPublicFrontend()
.fromFrontendHttpPort(84)
.toBackendHttpPort(8384)
.toBackendIPAddress("11.1.1.5")
.withPriority(20000)
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
appGateway.update()
.defineRequestRoutingRule("rule5")
.fromPublicFrontend()
.fromFrontendHttpPort(82)
.toBackendHttpPort(8282)
.toBackendIPAddress("11.1.1.6")
.attach()
.apply();
Integer rule1Priority = appGateway.requestRoutingRules().get("rule1").priority();
Integer rule2Priority = appGateway.requestRoutingRules().get("rule2").priority();
Integer rule5Priority = appGateway.requestRoutingRules().get("rule5").priority();
Assertions.assertTrue(rule1Priority < rule5Priority && rule2Priority < rule5Priority);
Assertions.assertEquals(1, appGateway.requestRoutingRules().get("rule3").priority());
Assertions.assertEquals(20000, appGateway.requestRoutingRules().get("rule4").priority());
appGateway.update()
.defineRequestRoutingRule("rule6")
.fromPublicFrontend()
.fromFrontendHttpPort(85)
.toBackendHttpPort(8585)
.toBackendIPAddress("11.1.1.7")
.attach()
.defineRequestRoutingRule("rule7")
.fromPublicFrontend()
.fromFrontendHttpPort(86)
.toBackendHttpPort(8686)
.toBackendIPAddress("11.1.1.8")
.withPriority(10040)
.attach()
.apply();
Assertions.assertEquals(10050, appGateway.requestRoutingRules().get("rule6").priority());
appGateway.update()
.updateRequestRoutingRule("rule3")
.withPriority(2)
.parent()
.apply();
Assertions.assertEquals(2, appGateway.requestRoutingRules().get("rule3").priority());
}
@Test
public void testAddRemoveIpAddressFromWafV2WithExclusionsEqualsAny() {
String appGatewayName = generateRandomResourceName("agwaf", 15);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withNewResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(
new ApplicationGatewayWebApplicationFirewallConfiguration()
.withEnabled(true)
.withFirewallMode(ApplicationGatewayFirewallMode.PREVENTION)
.withRuleSetType("OWASP")
.withRuleSetVersion("3.0")
.withExclusions(Collections.singletonList(
new ApplicationGatewayFirewallExclusion()
.withMatchVariable("RequestHeaderNames")
.withSelectorMatchOperator(null)
.withSelector(null)
))
)
.create();
Assertions.assertEquals("RequestHeaderNames", appGateway.webApplicationFirewallConfiguration().exclusions().iterator().next().matchVariable());
Assertions.assertNull(appGateway.webApplicationFirewallConfiguration().exclusions().iterator().next().selectorMatchOperator());
Map<String, ApplicationGatewayBackend> backends = appGateway.backends();
backends.forEach((name, backend) ->
backend.addresses().forEach(addr ->
appGateway.update()
.updateBackend(name)
.withoutIPAddress(addr.ipAddress())
.parent()
.apply()));
}
@Test
public void canAssociateWafPolicy() {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String wafPolicyName = generateRandomResourceName("waf", 15);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
WebApplicationFirewallPolicy wafPolicy =
networkManager
.webApplicationFirewallPolicies()
.define(wafPolicyName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.withManagedRuleSet(KnownWebApplicationGatewayManagedRuleSet.OWASP_3_2)
.create();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withExistingWebApplicationFirewallPolicy(wafPolicy)
.create();
Assertions.assertNotNull(appGateway.getWebApplicationFirewallPolicy());
Assertions.assertNull(appGateway.webApplicationFirewallConfiguration());
wafPolicy.refresh();
Assertions.assertEquals(appGateway.id(), wafPolicy.getAssociatedApplicationGateways().iterator().next().id());
Assertions.assertEquals(wafPolicy.id(), appGateway.getWebApplicationFirewallPolicy().id());
appGateway.update()
.withNewWebApplicationFirewallPolicy(WebApplicationFirewallMode.PREVENTION)
.apply();
WebApplicationFirewallPolicy newPolicy = appGateway.getWebApplicationFirewallPolicy();
Assertions.assertNotNull(newPolicy);
Assertions.assertTrue(newPolicy.isEnabled());
Assertions.assertEquals(WebApplicationFirewallMode.PREVENTION, newPolicy.mode());
Assertions.assertNotEquals(newPolicy.id(), wafPolicy.id());
Assertions.assertEquals(appGateway.id(), newPolicy.getAssociatedApplicationGateways().iterator().next().id());
Assertions.assertEquals(newPolicy.id(), appGateway.getWebApplicationFirewallPolicy().id());
String invalidPolicyName = "invalid";
Assertions.assertThrows(IllegalStateException.class, () -> {
networkManager.applicationGateways()
.define("invalid")
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withNewPublicIpAddress()
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withNewWebApplicationFirewallPolicy(
networkManager
.webApplicationFirewallPolicies()
.define(invalidPolicyName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.withManagedRuleSet(KnownWebApplicationGatewayManagedRuleSet.OWASP_3_2))
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
});
Assertions.assertTrue(
networkManager
.webApplicationFirewallPolicies()
.listByResourceGroup(rgName)
.stream()
.noneMatch(policy -> policy.name().equals(invalidPolicyName)));
}
@Test
public void canSetSslPolicy() {
String appGatewayName = generateRandomResourceName("agw", 15);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withPredefinedSslPolicy(ApplicationGatewaySslPolicyName.APP_GW_SSL_POLICY20150501)
.create();
ApplicationGatewaySslPolicy sslPolicy = appGateway.sslPolicy();
Assertions.assertNotNull(sslPolicy);
Assertions.assertEquals(ApplicationGatewaySslPolicyType.PREDEFINED, sslPolicy.policyType());
Assertions.assertEquals(ApplicationGatewaySslPolicyName.APP_GW_SSL_POLICY20150501, sslPolicy.policyName());
appGateway.update()
.withCustomV2SslPolicy(ApplicationGatewaySslProtocol.TLSV1_2, Collections.singletonList(ApplicationGatewaySslCipherSuite.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256))
.apply();
sslPolicy = appGateway.sslPolicy();
Assertions.assertNotNull(sslPolicy);
Assertions.assertEquals(ApplicationGatewaySslPolicyType.CUSTOM_V2, sslPolicy.policyType());
Assertions.assertNull(sslPolicy.policyName());
Assertions.assertEquals(ApplicationGatewaySslProtocol.TLSV1_2, sslPolicy.minProtocolVersion());
Assertions.assertTrue(sslPolicy.cipherSuites().contains(ApplicationGatewaySslCipherSuite.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256));
Assertions.assertThrows(ManagementException.class, () -> {
appGateway.update()
.withSslPolicy(new ApplicationGatewaySslPolicy()
.withPolicyType(ApplicationGatewaySslPolicyType.PREDEFINED)
.withPolicyName(ApplicationGatewaySslPolicyName.APP_GW_SSL_POLICY20150501)
.withMinProtocolVersion(ApplicationGatewaySslProtocol.TLSV1_1))
.apply();
});
}
@Test
public void canCreateApplicationGatewayWithDefaultSku() {
String appGatewayName = generateRandomResourceName("agw", 15);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withNewResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.attach()
.withExistingPublicIpAddress(pip)
.create();
Assertions.assertEquals(ApplicationGatewayTier.BASIC, appGateway.tier());
Assertions.assertNotNull(appGateway.requestRoutingRules().get("rule1").priority());
}
@Test
private String createKeyVaultCertificate(String servicePrincipal, String identityPrincipal) {
String vaultName = generateRandomResourceName("vlt", 10);
String secretName = generateRandomResourceName("srt", 10);
Vault vault =
keyVaultManager
.vaults()
.define(vaultName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineAccessPolicy()
.forServicePrincipal(servicePrincipal)
.allowSecretAllPermissions()
.allowCertificateAllPermissions()
.attach()
.defineAccessPolicy()
.forObjectId(identityPrincipal)
.allowSecretAllPermissions()
.attach()
.withAccessFromAzureServices()
.withDeploymentEnabled()
.create();
CertificateClient certificateClient = new CertificateClientBuilder()
.vaultUrl(vault.vaultUri())
.pipeline(vault.vaultHttpPipeline())
.buildClient();
KeyVaultCertificateWithPolicy certificate = certificateClient.beginCreateCertificate(secretName, CertificatePolicy.getDefault()).getFinalResult();
return certificate.getSecretId();
}
private Secret createKeyVaultSecret(String servicePrincipal, String identityPrincipal) throws Exception {
String vaultName = generateRandomResourceName("vlt", 10);
String secretName = generateRandomResourceName("srt", 10);
BufferedReader buff = new BufferedReader(new FileReader(new File(getClass().getClassLoader()
.getResource("test.certificate").getFile())));
String secretValue = buff.readLine();
Vault vault =
keyVaultManager
.vaults()
.define(vaultName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineAccessPolicy()
.forServicePrincipal(servicePrincipal)
.allowSecretAllPermissions()
.attach()
.defineAccessPolicy()
.forObjectId(identityPrincipal)
.allowSecretAllPermissions()
.attach()
.withAccessFromAzureServices()
.withDeploymentEnabled()
.create();
return vault.secrets().define(secretName).withValue(secretValue).create();
}
private static ManagedServiceIdentity createManagedServiceIdentityFromIdentity(Identity identity) throws Exception {
ObjectMapper mapper = new ObjectMapper();
JsonNode userAssignedIdentitiesValueObject = mapper.createObjectNode();
((ObjectNode) userAssignedIdentitiesValueObject).put("principalId", identity.principalId());
((ObjectNode) userAssignedIdentitiesValueObject).put("clientId", identity.clientId());
ManagedServiceIdentityUserAssignedIdentities userAssignedIdentitiesValue =
new JacksonAdapter()
.deserialize(
mapper.writerWithDefaultPrettyPrinter().writeValueAsString(userAssignedIdentitiesValueObject),
ManagedServiceIdentityUserAssignedIdentities.class,
SerializerEncoding.JSON);
Map<String, ManagedServiceIdentityUserAssignedIdentities> userAssignedIdentities = new HashMap<>();
userAssignedIdentities.put(identity.id(), userAssignedIdentitiesValue);
ManagedServiceIdentity serviceIdentity = new ManagedServiceIdentity();
serviceIdentity.withType(ResourceIdentityType.USER_ASSIGNED);
serviceIdentity.withUserAssignedIdentities(userAssignedIdentities);
return serviceIdentity;
}
private PublicIpAddress createResourceGroupAndPublicIpAddress() {
String appPublicIp = generateRandomResourceName("pip", 15);
return networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(REGION)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
}
} | class ApplicationGatewayTests extends NetworkManagementTest {
private static final Region REGION = Region.US_EAST;
@Test
public void canCRUDApplicationGatewayWithWAF() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertNotNull(appGateway);
Assertions.assertEquals(ApplicationGatewayTier.WAF_V2, appGateway.tier());
Assertions.assertEquals(ApplicationGatewaySkuName.WAF_V2, appGateway.size());
Assertions.assertEquals(2, appGateway.autoscaleConfiguration().minCapacity());
Assertions.assertEquals(5, (int) appGateway.autoscaleConfiguration().maxCapacity());
ApplicationGatewayWebApplicationFirewallConfiguration config = appGateway.webApplicationFirewallConfiguration();
config.withFileUploadLimitInMb(200);
config
.withDisabledRuleGroups(
Arrays
.asList(
new ApplicationGatewayFirewallDisabledRuleGroup()
.withRuleGroupName("REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION")));
config.withRequestBodyCheck(true);
config.withMaxRequestBodySizeInKb(64);
config
.withExclusions(
Arrays
.asList(
new ApplicationGatewayFirewallExclusion()
.withMatchVariable("RequestHeaderNames")
.withSelectorMatchOperator("StartsWith")
.withSelector("User-Agent")));
appGateway.update().withWebApplicationFirewall(config).apply();
appGateway.refresh();
Assertions.assertEquals(200, (int) appGateway.webApplicationFirewallConfiguration().fileUploadLimitInMb());
Assertions.assertTrue(appGateway.webApplicationFirewallConfiguration().requestBodyCheck());
Assertions.assertEquals(64, (int) appGateway.webApplicationFirewallConfiguration().maxRequestBodySizeInKb());
Assertions.assertEquals(1, appGateway.webApplicationFirewallConfiguration().exclusions().size());
Assertions.assertEquals(
"RequestHeaderNames",
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).matchVariable());
Assertions.assertEquals(
"StartsWith",
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selectorMatchOperator());
Assertions.assertEquals(
"User-Agent",
appGateway.webApplicationFirewallConfiguration().exclusions().get(0).selector());
Assertions.assertEquals(1, appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().size());
Assertions.assertEquals(
"REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION",
appGateway.webApplicationFirewallConfiguration().disabledRuleGroups().get(0).ruleGroupName());
}
@Test
public void canSpecifyWildcardListeners() {
String appGatewayName = generateRandomResourceName("agwaf", 15);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
String listener1 = "listener1";
String hostname1 = "my.contoso.com";
ApplicationGateway gateway = networkManager.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule80")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.withCookieBasedAffinity()
.attach()
.defineListener(listener1)
.withPublicFrontend()
.withFrontendPort(9000)
.withHttp()
.withHostname(hostname1)
.attach()
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withExistingPublicIpAddress(pip)
.create();
Assertions.assertEquals(hostname1, gateway.listeners().get(listener1).hostname());
String hostname2 = "*.contoso.com";
gateway.update()
.updateListener(listener1)
.withHostname(hostname2)
.parent()
.apply();
Assertions.assertEquals(hostname2, gateway.listeners().get(listener1).hostname());
List<String> hostnames = new ArrayList<>();
hostnames.add(hostname1);
hostnames.add(hostname2);
gateway.update()
.updateListener(listener1)
.withHostnames(hostnames)
.parent()
.apply();
Assertions.assertEquals(hostnames, gateway.listeners().get(listener1).hostnames());
}
@Test
@DoNotRecord(skipInPlayback = true)
public void canCreateApplicationGatewayWithSecret() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String identityName = generateRandomResourceName("id", 10);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
Identity identity =
msiManager
.identities()
.define(identityName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.create();
Assertions.assertNotNull(identity.name());
Assertions.assertNotNull(identity.principalId());
Secret secret1 = createKeyVaultSecret(clientIdFromFile(), identity.principalId());
Secret secret2 = createKeyVaultSecret(clientIdFromFile(), identity.principalId());
ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity);
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpsPort(443)
.withSslCertificate("ssl1")
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withIdentity(serviceIdentity)
.defineSslCertificate("ssl1")
.withKeyVaultSecretId(secret1.id())
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertEquals(secret1.id(), appGateway.sslCertificates().get("ssl1").keyVaultSecretId());
Assertions
.assertEquals(
secret1.id(), appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId());
appGateway =
appGateway.update().defineSslCertificate("ssl2").withKeyVaultSecretId(secret2.id()).attach().apply();
Assertions.assertEquals(secret2.id(), appGateway.sslCertificates().get("ssl2").keyVaultSecretId());
}
@Test
@DoNotRecord(skipInPlayback = true)
public void canCreateApplicationGatewayWithSslCertificate() throws Exception {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String identityName = generateRandomResourceName("id", 10);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
Identity identity =
msiManager
.identities()
.define(identityName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.create();
Assertions.assertNotNull(identity.name());
Assertions.assertNotNull(identity.principalId());
ManagedServiceIdentity serviceIdentity = createManagedServiceIdentityFromIdentity(identity);
String secretId = createKeyVaultCertificate(clientIdFromFile(), identity.principalId());
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpsPort(443)
.withSslCertificate("ssl1")
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withIdentity(serviceIdentity)
.defineSslCertificate("ssl1")
.withKeyVaultSecretId(secretId)
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
Assertions.assertEquals(secretId, appGateway.sslCertificates().get("ssl1").keyVaultSecretId());
Assertions.assertEquals(secretId, appGateway.requestRoutingRules().get("rule1").sslCertificate().keyVaultSecretId());
}
@Test
public void canAutoAssignPriorityForRequestRoutingRulesWithWAF() {
String appGatewayName = generateRandomResourceName("agwaf", 15);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.defineRequestRoutingRule("rule2")
.fromPublicFrontend()
.fromFrontendHttpPort(81)
.toBackendHttpPort(8181)
.toBackendIPAddress("11.1.1.3")
.attach()
.defineRequestRoutingRule("rule3")
.fromPublicFrontend()
.fromFrontendHttpPort(83)
.toBackendHttpPort(8383)
.toBackendIPAddress("11.1.1.4")
.withPriority(1)
.attach()
.defineRequestRoutingRule("rule4")
.fromPublicFrontend()
.fromFrontendHttpPort(84)
.toBackendHttpPort(8384)
.toBackendIPAddress("11.1.1.5")
.withPriority(20000)
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
appGateway.update()
.defineRequestRoutingRule("rule5")
.fromPublicFrontend()
.fromFrontendHttpPort(82)
.toBackendHttpPort(8282)
.toBackendIPAddress("11.1.1.6")
.attach()
.apply();
Integer rule1Priority = appGateway.requestRoutingRules().get("rule1").priority();
Integer rule2Priority = appGateway.requestRoutingRules().get("rule2").priority();
Integer rule5Priority = appGateway.requestRoutingRules().get("rule5").priority();
Assertions.assertTrue(rule1Priority < rule5Priority && rule2Priority < rule5Priority);
Assertions.assertEquals(1, appGateway.requestRoutingRules().get("rule3").priority());
Assertions.assertEquals(20000, appGateway.requestRoutingRules().get("rule4").priority());
appGateway.update()
.defineRequestRoutingRule("rule6")
.fromPublicFrontend()
.fromFrontendHttpPort(85)
.toBackendHttpPort(8585)
.toBackendIPAddress("11.1.1.7")
.attach()
.defineRequestRoutingRule("rule7")
.fromPublicFrontend()
.fromFrontendHttpPort(86)
.toBackendHttpPort(8686)
.toBackendIPAddress("11.1.1.8")
.withPriority(10040)
.attach()
.apply();
Assertions.assertEquals(10050, appGateway.requestRoutingRules().get("rule6").priority());
appGateway.update()
.updateRequestRoutingRule("rule3")
.withPriority(2)
.parent()
.apply();
Assertions.assertEquals(2, appGateway.requestRoutingRules().get("rule3").priority());
}
@Test
public void testAddRemoveIpAddressFromWafV2WithExclusionsEqualsAny() {
String appGatewayName = generateRandomResourceName("agwaf", 15);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withNewResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withAutoScale(2, 5)
.withWebApplicationFirewall(
new ApplicationGatewayWebApplicationFirewallConfiguration()
.withEnabled(true)
.withFirewallMode(ApplicationGatewayFirewallMode.PREVENTION)
.withRuleSetType("OWASP")
.withRuleSetVersion("3.0")
.withExclusions(Collections.singletonList(
new ApplicationGatewayFirewallExclusion()
.withMatchVariable("RequestHeaderNames")
.withSelectorMatchOperator(null)
.withSelector(null)
))
)
.create();
Assertions.assertEquals("RequestHeaderNames", appGateway.webApplicationFirewallConfiguration().exclusions().iterator().next().matchVariable());
Assertions.assertNull(appGateway.webApplicationFirewallConfiguration().exclusions().iterator().next().selectorMatchOperator());
Map<String, ApplicationGatewayBackend> backends = appGateway.backends();
backends.forEach((name, backend) ->
backend.addresses().forEach(addr ->
appGateway.update()
.updateBackend(name)
.withoutIPAddress(addr.ipAddress())
.parent()
.apply()));
}
@Test
public void canAssociateWafPolicy() {
String appGatewayName = generateRandomResourceName("agwaf", 15);
String wafPolicyName = generateRandomResourceName("waf", 15);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
WebApplicationFirewallPolicy wafPolicy =
networkManager
.webApplicationFirewallPolicies()
.define(wafPolicyName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.withManagedRuleSet(KnownWebApplicationGatewayManagedRuleSet.OWASP_3_2)
.create();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withExistingWebApplicationFirewallPolicy(wafPolicy)
.create();
Assertions.assertNotNull(appGateway.getWebApplicationFirewallPolicy());
Assertions.assertNull(appGateway.webApplicationFirewallConfiguration());
wafPolicy.refresh();
Assertions.assertEquals(appGateway.id(), wafPolicy.getAssociatedApplicationGateways().iterator().next().id());
Assertions.assertEquals(wafPolicy.id(), appGateway.getWebApplicationFirewallPolicy().id());
appGateway.update()
.withNewWebApplicationFirewallPolicy(WebApplicationFirewallMode.PREVENTION)
.apply();
WebApplicationFirewallPolicy newPolicy = appGateway.getWebApplicationFirewallPolicy();
Assertions.assertNotNull(newPolicy);
Assertions.assertTrue(newPolicy.isEnabled());
Assertions.assertEquals(WebApplicationFirewallMode.PREVENTION, newPolicy.mode());
Assertions.assertNotEquals(newPolicy.id(), wafPolicy.id());
Assertions.assertEquals(appGateway.id(), newPolicy.getAssociatedApplicationGateways().iterator().next().id());
Assertions.assertEquals(newPolicy.id(), appGateway.getWebApplicationFirewallPolicy().id());
String invalidPolicyName = "invalid";
Assertions.assertThrows(IllegalStateException.class, () -> {
networkManager.applicationGateways()
.define("invalid")
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.toBackendIPAddress("11.1.1.2")
.attach()
.withNewPublicIpAddress()
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withNewWebApplicationFirewallPolicy(
networkManager
.webApplicationFirewallPolicies()
.define(invalidPolicyName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.withManagedRuleSet(KnownWebApplicationGatewayManagedRuleSet.OWASP_3_2))
.withWebApplicationFirewall(true, ApplicationGatewayFirewallMode.PREVENTION)
.create();
});
Assertions.assertTrue(
networkManager
.webApplicationFirewallPolicies()
.listByResourceGroup(rgName)
.stream()
.noneMatch(policy -> policy.name().equals(invalidPolicyName)));
}
@Test
public void canSetSslPolicy() {
String appGatewayName = generateRandomResourceName("agw", 15);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.attach()
.withExistingPublicIpAddress(pip)
.withTier(ApplicationGatewayTier.WAF_V2)
.withSize(ApplicationGatewaySkuName.WAF_V2)
.withPredefinedSslPolicy(ApplicationGatewaySslPolicyName.APP_GW_SSL_POLICY20150501)
.create();
ApplicationGatewaySslPolicy sslPolicy = appGateway.sslPolicy();
Assertions.assertNotNull(sslPolicy);
Assertions.assertEquals(ApplicationGatewaySslPolicyType.PREDEFINED, sslPolicy.policyType());
Assertions.assertEquals(ApplicationGatewaySslPolicyName.APP_GW_SSL_POLICY20150501, sslPolicy.policyName());
appGateway.update()
.withCustomV2SslPolicy(ApplicationGatewaySslProtocol.TLSV1_2, Collections.singletonList(ApplicationGatewaySslCipherSuite.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256))
.apply();
sslPolicy = appGateway.sslPolicy();
Assertions.assertNotNull(sslPolicy);
Assertions.assertEquals(ApplicationGatewaySslPolicyType.CUSTOM_V2, sslPolicy.policyType());
Assertions.assertNull(sslPolicy.policyName());
Assertions.assertEquals(ApplicationGatewaySslProtocol.TLSV1_2, sslPolicy.minProtocolVersion());
Assertions.assertTrue(sslPolicy.cipherSuites().contains(ApplicationGatewaySslCipherSuite.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256));
Assertions.assertThrows(ManagementException.class, () -> {
appGateway.update()
.withSslPolicy(new ApplicationGatewaySslPolicy()
.withPolicyType(ApplicationGatewaySslPolicyType.PREDEFINED)
.withPolicyName(ApplicationGatewaySslPolicyName.APP_GW_SSL_POLICY20150501)
.withMinProtocolVersion(ApplicationGatewaySslProtocol.TLSV1_1))
.apply();
});
}
@Test
public void canCreateApplicationGatewayWithDefaultSku() {
String appGatewayName = generateRandomResourceName("agw", 15);
PublicIpAddress pip = createResourceGroupAndPublicIpAddress();
ApplicationGateway appGateway =
networkManager
.applicationGateways()
.define(appGatewayName)
.withRegion(REGION)
.withNewResourceGroup(rgName)
.defineRequestRoutingRule("rule1")
.fromPublicFrontend()
.fromFrontendHttpPort(80)
.toBackendHttpPort(8080)
.toBackendIPAddress("11.1.1.1")
.attach()
.withExistingPublicIpAddress(pip)
.create();
Assertions.assertEquals(ApplicationGatewayTier.BASIC, appGateway.tier());
Assertions.assertNotNull(appGateway.requestRoutingRules().get("rule1").priority());
}
@Test
private String createKeyVaultCertificate(String servicePrincipal, String identityPrincipal) {
String vaultName = generateRandomResourceName("vlt", 10);
String secretName = generateRandomResourceName("srt", 10);
Vault vault =
keyVaultManager
.vaults()
.define(vaultName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineAccessPolicy()
.forServicePrincipal(servicePrincipal)
.allowSecretAllPermissions()
.allowCertificateAllPermissions()
.attach()
.defineAccessPolicy()
.forObjectId(identityPrincipal)
.allowSecretAllPermissions()
.attach()
.withAccessFromAzureServices()
.withDeploymentEnabled()
.create();
CertificateClient certificateClient = new CertificateClientBuilder()
.vaultUrl(vault.vaultUri())
.pipeline(vault.vaultHttpPipeline())
.buildClient();
KeyVaultCertificateWithPolicy certificate = certificateClient.beginCreateCertificate(secretName, CertificatePolicy.getDefault()).getFinalResult();
return certificate.getSecretId();
}
private Secret createKeyVaultSecret(String servicePrincipal, String identityPrincipal) throws Exception {
String vaultName = generateRandomResourceName("vlt", 10);
String secretName = generateRandomResourceName("srt", 10);
BufferedReader buff = new BufferedReader(new FileReader(new File(getClass().getClassLoader()
.getResource("test.certificate").getFile())));
String secretValue = buff.readLine();
Vault vault =
keyVaultManager
.vaults()
.define(vaultName)
.withRegion(REGION)
.withExistingResourceGroup(rgName)
.defineAccessPolicy()
.forServicePrincipal(servicePrincipal)
.allowSecretAllPermissions()
.attach()
.defineAccessPolicy()
.forObjectId(identityPrincipal)
.allowSecretAllPermissions()
.attach()
.withAccessFromAzureServices()
.withDeploymentEnabled()
.create();
return vault.secrets().define(secretName).withValue(secretValue).create();
}
private static ManagedServiceIdentity createManagedServiceIdentityFromIdentity(Identity identity) throws Exception {
ObjectMapper mapper = new ObjectMapper();
JsonNode userAssignedIdentitiesValueObject = mapper.createObjectNode();
((ObjectNode) userAssignedIdentitiesValueObject).put("principalId", identity.principalId());
((ObjectNode) userAssignedIdentitiesValueObject).put("clientId", identity.clientId());
ManagedServiceIdentityUserAssignedIdentities userAssignedIdentitiesValue =
new JacksonAdapter()
.deserialize(
mapper.writerWithDefaultPrettyPrinter().writeValueAsString(userAssignedIdentitiesValueObject),
ManagedServiceIdentityUserAssignedIdentities.class,
SerializerEncoding.JSON);
Map<String, ManagedServiceIdentityUserAssignedIdentities> userAssignedIdentities = new HashMap<>();
userAssignedIdentities.put(identity.id(), userAssignedIdentitiesValue);
ManagedServiceIdentity serviceIdentity = new ManagedServiceIdentity();
serviceIdentity.withType(ResourceIdentityType.USER_ASSIGNED);
serviceIdentity.withUserAssignedIdentities(userAssignedIdentities);
return serviceIdentity;
}
private PublicIpAddress createResourceGroupAndPublicIpAddress() {
String appPublicIp = generateRandomResourceName("pip", 15);
return networkManager
.publicIpAddresses()
.define(appPublicIp)
.withRegion(REGION)
.withNewResourceGroup(rgName)
.withSku(PublicIPSkuType.STANDARD)
.withStaticIP()
.create();
}
} |
So, session token capturing in `SessionContainer` is falling back to client-side settings. Wouldn't this lead to session token being captured in case of account-level consistencies weaker than Session consistency? I assume customers doing account-level changes (changing consistency from Eventual to Session) is less prevalent than client-level changes (changing consistency from Strong/Bounded Staleness to Session) which would warrant a populated session container. | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return this.diagnosticsClientConfig.getMachineId();
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnostics(
CosmosQueryRequestOptions requestOptions,
CosmosException exception) {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, cancellationException);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, exception);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getRxDocumentServiceResponseMonoWithE2ETimeout(
RxDocumentServiceRequest request,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono) {
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return Mono.error(getNegativeTimeoutException(request, endToEndTimeout));
}
request.requestContext.setEndToEndOperationLatencyPolicyConfig(endToEndPolicyConfig);
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationException(request, throwable));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationException(RxDocumentServiceRequest request, Throwable throwable) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
if (request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(RxDocumentServiceRequest request, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (request != null && request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)));
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return (options != null && options.getCosmosEndToEndLatencyPolicyConfig() != null) ?
options.getCosmosEndToEndLatencyPolicyConfig() : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
options,
documentClientRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = patch(request, retryPolicyInstance);
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
options,
requestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = this
.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance, endToEndPolicyConfig, clientContextOverride), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
DiagnosticsClientContext effectiveClientContext;
ScopedDiagnosticsFactory diagnosticsFactory;
if (orderedApplicableRegionsForSpeculation.size() < 2) {
effectiveClientContext = this;
diagnosticsFactory = null;
} else {
diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
effectiveClientContext = diagnosticsFactory;
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
effectiveClientContext,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
effectiveClientContext,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) {
logger.debug("getFeedRange collectionLink=[{}]", collectionLink);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = req
.requestContext
.getEndToEndOperationLatencyPolicyConfig();
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return this.diagnosticsClientConfig.getMachineId();
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnostics(
CosmosQueryRequestOptions requestOptions,
CosmosException exception) {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, cancellationException);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, exception);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getRxDocumentServiceResponseMonoWithE2ETimeout(
RxDocumentServiceRequest request,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono) {
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return Mono.error(getNegativeTimeoutException(request, endToEndTimeout));
}
request.requestContext.setEndToEndOperationLatencyPolicyConfig(endToEndPolicyConfig);
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationException(request, throwable));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationException(RxDocumentServiceRequest request, Throwable throwable) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
if (request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(RxDocumentServiceRequest request, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (request != null && request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)));
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return this.getEffectiveEndToEndOperationLatencyPolicyConfig(
options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null
);
}
private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig(
CosmosEndToEndOperationLatencyPolicyConfig policyConfig) {
return policyConfig != null ? policyConfig : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
options,
documentClientRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = patch(request, retryPolicyInstance);
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
options,
requestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = this
.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance, endToEndPolicyConfig, clientContextOverride), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
DiagnosticsClientContext effectiveClientContext;
ScopedDiagnosticsFactory diagnosticsFactory;
if (orderedApplicableRegionsForSpeculation.size() < 2) {
effectiveClientContext = this;
diagnosticsFactory = null;
} else {
diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
effectiveClientContext = diagnosticsFactory;
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
effectiveClientContext,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
effectiveClientContext,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) {
logger.debug("getFeedRange collectionLink=[{}]", collectionLink);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
this.getEffectiveEndToEndOperationLatencyPolicyConfig(
req.requestContext.getEndToEndOperationLatencyPolicyConfig());
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} |
Capturing session tokens in that case is fine. Whether Session consistency is used depends on two things - effective consistency level for a request and whether session token is present (customer provided or in the session token container). Setting client level consistency to Session and account level consistency being eventual was possible before as well. | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return this.diagnosticsClientConfig.getMachineId();
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnostics(
CosmosQueryRequestOptions requestOptions,
CosmosException exception) {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, cancellationException);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, exception);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getRxDocumentServiceResponseMonoWithE2ETimeout(
RxDocumentServiceRequest request,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono) {
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return Mono.error(getNegativeTimeoutException(request, endToEndTimeout));
}
request.requestContext.setEndToEndOperationLatencyPolicyConfig(endToEndPolicyConfig);
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationException(request, throwable));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationException(RxDocumentServiceRequest request, Throwable throwable) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
if (request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(RxDocumentServiceRequest request, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (request != null && request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)));
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return (options != null && options.getCosmosEndToEndLatencyPolicyConfig() != null) ?
options.getCosmosEndToEndLatencyPolicyConfig() : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
options,
documentClientRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = patch(request, retryPolicyInstance);
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
options,
requestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = this
.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance, endToEndPolicyConfig, clientContextOverride), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
DiagnosticsClientContext effectiveClientContext;
ScopedDiagnosticsFactory diagnosticsFactory;
if (orderedApplicableRegionsForSpeculation.size() < 2) {
effectiveClientContext = this;
diagnosticsFactory = null;
} else {
diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
effectiveClientContext = diagnosticsFactory;
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
effectiveClientContext,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
effectiveClientContext,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) {
logger.debug("getFeedRange collectionLink=[{}]", collectionLink);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = req
.requestContext
.getEndToEndOperationLatencyPolicyConfig();
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return this.diagnosticsClientConfig.getMachineId();
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnostics(
CosmosQueryRequestOptions requestOptions,
CosmosException exception) {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, cancellationException);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, exception);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getRxDocumentServiceResponseMonoWithE2ETimeout(
RxDocumentServiceRequest request,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono) {
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return Mono.error(getNegativeTimeoutException(request, endToEndTimeout));
}
request.requestContext.setEndToEndOperationLatencyPolicyConfig(endToEndPolicyConfig);
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationException(request, throwable));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationException(RxDocumentServiceRequest request, Throwable throwable) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
if (request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(RxDocumentServiceRequest request, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (request != null && request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)));
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return this.getEffectiveEndToEndOperationLatencyPolicyConfig(
options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null
);
}
private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig(
CosmosEndToEndOperationLatencyPolicyConfig policyConfig) {
return policyConfig != null ? policyConfig : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
options,
documentClientRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = patch(request, retryPolicyInstance);
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
options,
requestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = this
.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance, endToEndPolicyConfig, clientContextOverride), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
DiagnosticsClientContext effectiveClientContext;
ScopedDiagnosticsFactory diagnosticsFactory;
if (orderedApplicableRegionsForSpeculation.size() < 2) {
effectiveClientContext = this;
diagnosticsFactory = null;
} else {
diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
effectiveClientContext = diagnosticsFactory;
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
effectiveClientContext,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
effectiveClientContext,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) {
logger.debug("getFeedRange collectionLink=[{}]", collectionLink);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
this.getEffectiveEndToEndOperationLatencyPolicyConfig(
req.requestContext.getEndToEndOperationLatencyPolicyConfig());
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} |
In general Cient-setting overrides account default RequestOptions override client-setting | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return this.diagnosticsClientConfig.getMachineId();
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnostics(
CosmosQueryRequestOptions requestOptions,
CosmosException exception) {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, cancellationException);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, exception);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getRxDocumentServiceResponseMonoWithE2ETimeout(
RxDocumentServiceRequest request,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono) {
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return Mono.error(getNegativeTimeoutException(request, endToEndTimeout));
}
request.requestContext.setEndToEndOperationLatencyPolicyConfig(endToEndPolicyConfig);
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationException(request, throwable));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationException(RxDocumentServiceRequest request, Throwable throwable) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
if (request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(RxDocumentServiceRequest request, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (request != null && request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)));
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return (options != null && options.getCosmosEndToEndLatencyPolicyConfig() != null) ?
options.getCosmosEndToEndLatencyPolicyConfig() : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
options,
documentClientRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = patch(request, retryPolicyInstance);
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
options,
requestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = this
.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance, endToEndPolicyConfig, clientContextOverride), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
DiagnosticsClientContext effectiveClientContext;
ScopedDiagnosticsFactory diagnosticsFactory;
if (orderedApplicableRegionsForSpeculation.size() < 2) {
effectiveClientContext = this;
diagnosticsFactory = null;
} else {
diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
effectiveClientContext = diagnosticsFactory;
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
effectiveClientContext,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
effectiveClientContext,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) {
logger.debug("getFeedRange collectionLink=[{}]", collectionLink);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = req
.requestContext
.getEndToEndOperationLatencyPolicyConfig();
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return this.diagnosticsClientConfig.getMachineId();
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnostics(
CosmosQueryRequestOptions requestOptions,
CosmosException exception) {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, cancellationException);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, exception);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getRxDocumentServiceResponseMonoWithE2ETimeout(
RxDocumentServiceRequest request,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono) {
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return Mono.error(getNegativeTimeoutException(request, endToEndTimeout));
}
request.requestContext.setEndToEndOperationLatencyPolicyConfig(endToEndPolicyConfig);
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationException(request, throwable));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationException(RxDocumentServiceRequest request, Throwable throwable) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
if (request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(RxDocumentServiceRequest request, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (request != null && request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)));
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return this.getEffectiveEndToEndOperationLatencyPolicyConfig(
options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null
);
}
private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig(
CosmosEndToEndOperationLatencyPolicyConfig policyConfig) {
return policyConfig != null ? policyConfig : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
options,
documentClientRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = patch(request, retryPolicyInstance);
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
options,
requestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = this
.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance, endToEndPolicyConfig, clientContextOverride), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
DiagnosticsClientContext effectiveClientContext;
ScopedDiagnosticsFactory diagnosticsFactory;
if (orderedApplicableRegionsForSpeculation.size() < 2) {
effectiveClientContext = this;
diagnosticsFactory = null;
} else {
diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
effectiveClientContext = diagnosticsFactory;
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
effectiveClientContext,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
effectiveClientContext,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) {
logger.debug("getFeedRange collectionLink=[{}]", collectionLink);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
this.getEffectiveEndToEndOperationLatencyPolicyConfig(
req.requestContext.getEndToEndOperationLatencyPolicyConfig());
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} |
Upgrades should always be ignored | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null | public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) {
try {
this.httpClientInterceptor = httpClientInterceptor;
if (httpClientInterceptor != null) {
this.reactorHttpClient = httpClientInterceptor.apply(httpClient());
}
this.gatewayProxy = createRxGatewayProxy(this.sessionContainer,
this.consistencyLevel,
this.queryCompatibilityMode,
this.userAgentContainer,
this.globalEndpointManager,
this.reactorHttpClient,
this.apiType);
this.globalEndpointManager.init();
this.initializeGatewayConfigurationReader();
if (metadataCachesSnapshot != null) {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy,
metadataCachesSnapshot.getCollectionInfoByNameCache(),
metadataCachesSnapshot.getCollectionInfoByIdCache()
);
} else {
this.collectionCache = new RxClientCollectionCache(this,
this.sessionContainer,
this.gatewayProxy,
this,
this.retryPolicy);
}
this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy);
this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this,
collectionCache);
updateGatewayProxy();
clientTelemetry = new ClientTelemetry(
this,
null,
randomUuid().toString(),
ManagementFactory.getRuntimeMXBean().getName(),
connectionPolicy.getConnectionMode(),
globalEndpointManager.getLatestDatabaseAccount().getId(),
null,
null,
this.configs,
this.clientTelemetryConfig,
this,
this.connectionPolicy.getPreferredRegions());
clientTelemetry.init();
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) {
this.storeModel = this.gatewayProxy;
} else {
this.initializeDirectConnectivity();
}
this.retryPolicy.setRxCollectionCache(this.collectionCache);
ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null
? consistencyLevel
: this.getDefaultConsistencyLevelOfAccount();
boolean updatedDisableSessionCapturing =
(ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing);
} catch (Exception e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return this.diagnosticsClientConfig.getMachineId();
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnostics(
CosmosQueryRequestOptions requestOptions,
CosmosException exception) {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, cancellationException);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, exception);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getRxDocumentServiceResponseMonoWithE2ETimeout(
RxDocumentServiceRequest request,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono) {
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return Mono.error(getNegativeTimeoutException(request, endToEndTimeout));
}
request.requestContext.setEndToEndOperationLatencyPolicyConfig(endToEndPolicyConfig);
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationException(request, throwable));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationException(RxDocumentServiceRequest request, Throwable throwable) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
if (request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(RxDocumentServiceRequest request, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (request != null && request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)));
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return (options != null && options.getCosmosEndToEndLatencyPolicyConfig() != null) ?
options.getCosmosEndToEndLatencyPolicyConfig() : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
options,
documentClientRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = patch(request, retryPolicyInstance);
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
options,
requestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = this
.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance, endToEndPolicyConfig, clientContextOverride), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
DiagnosticsClientContext effectiveClientContext;
ScopedDiagnosticsFactory diagnosticsFactory;
if (orderedApplicableRegionsForSpeculation.size() < 2) {
effectiveClientContext = this;
diagnosticsFactory = null;
} else {
diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
effectiveClientContext = diagnosticsFactory;
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
effectiveClientContext,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
effectiveClientContext,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) {
logger.debug("getFeedRange collectionLink=[{}]", collectionLink);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = req
.requestContext
.getEndToEndOperationLatencyPolicyConfig();
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} | class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener,
DiagnosticsClientContext {
private final static List<String> EMPTY_REGION_LIST = Collections.emptyList();
private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor();
private final static
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor =
ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor();
private final static
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor =
ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor();
private final static
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor =
ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor();
private static final String tempMachineId = "uuid:" + UUID.randomUUID();
private static final AtomicInteger activeClientsCnt = new AtomicInteger(0);
private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>();
private static final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>(
PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey,
PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false);
private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " +
"ParallelDocumentQueryExecutioncontext, but not used";
private final static ObjectMapper mapper = Utils.getSimpleObjectMapper();
private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer();
private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class);
private final String masterKeyOrResourceToken;
private final URI serviceEndpoint;
private final ConnectionPolicy connectionPolicy;
private final ConsistencyLevel consistencyLevel;
private final BaseAuthorizationTokenProvider authorizationTokenProvider;
private final UserAgentContainer userAgentContainer;
private final boolean hasAuthKeyResourceToken;
private final Configs configs;
private final boolean connectionSharingAcrossClientsEnabled;
private AzureKeyCredential credential;
private final TokenCredential tokenCredential;
private String[] tokenCredentialScopes;
private SimpleTokenCache tokenCredentialCache;
private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver;
AuthorizationTokenType authorizationTokenType;
private SessionContainer sessionContainer;
private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY;
private RxClientCollectionCache collectionCache;
private RxGatewayStoreModel gatewayProxy;
private RxStoreModel storeModel;
private GlobalAddressResolver addressResolver;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap;
private final boolean contentResponseOnWriteEnabled;
private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final int clientId;
private ClientTelemetry clientTelemetry;
private final ApiType apiType;
private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig;
private IRetryPolicyFactory resetSessionTokenRetryPolicy;
/**
* Compatibility mode: Allows to specify compatibility mode used by client when
* making query requests. Should be removed when application/sql is no longer
* supported.
*/
private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default;
private final GlobalEndpointManager globalEndpointManager;
private final RetryPolicy retryPolicy;
private HttpClient reactorHttpClient;
private Function<HttpClient, HttpClient> httpClientInterceptor;
private volatile boolean useMultipleWriteLocations;
private StoreClientFactory storeClientFactory;
private GatewayServiceConfigurationReader gatewayConfigurationReader;
private final DiagnosticsClientConfig diagnosticsClientConfig;
private final AtomicBoolean throughputControlEnabled;
private ThroughputControlStore throughputControlStore;
private final CosmosClientTelemetryConfig clientTelemetryConfig;
private final String clientCorrelationId;
private final SessionRetryOptions sessionRetryOptions;
private final boolean sessionCapturingOverrideEnabled;
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
null,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
public RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverride,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
permissionFeed,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverride,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver;
}
private RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
List<Permission> permissionFeed,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
this(
serviceEndpoint,
masterKeyOrResourceToken,
connectionPolicy,
consistencyLevel,
configs,
credential,
tokenCredential,
sessionCapturingOverrideEnabled,
connectionSharingAcrossClientsEnabled,
contentResponseOnWriteEnabled,
metadataCachesSnapshot,
apiType,
clientTelemetryConfig,
clientCorrelationId,
cosmosEndToEndOperationLatencyPolicyConfig,
sessionRetryOptions,
containerProactiveInitConfig);
if (permissionFeed != null && permissionFeed.size() > 0) {
this.resourceTokensMap = new HashMap<>();
for (Permission permission : permissionFeed) {
String[] segments = StringUtils.split(permission.getResourceLink(),
Constants.Properties.PATH_SEPARATOR.charAt(0));
if (segments.length == 0) {
throw new IllegalArgumentException("resourceLink");
}
List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null;
PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false);
if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) {
throw new IllegalArgumentException(permission.getResourceLink());
}
partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName);
if (partitionKeyAndResourceTokenPairs == null) {
partitionKeyAndResourceTokenPairs = new ArrayList<>();
this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs);
}
PartitionKey partitionKey = permission.getResourcePartitionKey();
partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair(
partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty,
permission.getToken()));
logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]",
pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken());
}
if(this.resourceTokensMap.isEmpty()) {
throw new IllegalArgumentException("permissionFeed");
}
String firstToken = permissionFeed.get(0).getToken();
if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) {
this.firstResourceTokenFromPermissionFeed = firstToken;
}
}
}
RxDocumentClientImpl(URI serviceEndpoint,
String masterKeyOrResourceToken,
ConnectionPolicy connectionPolicy,
ConsistencyLevel consistencyLevel,
Configs configs,
AzureKeyCredential credential,
TokenCredential tokenCredential,
boolean sessionCapturingOverrideEnabled,
boolean connectionSharingAcrossClientsEnabled,
boolean contentResponseOnWriteEnabled,
CosmosClientMetadataCachesSnapshot metadataCachesSnapshot,
ApiType apiType,
CosmosClientTelemetryConfig clientTelemetryConfig,
String clientCorrelationId,
CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig,
SessionRetryOptions sessionRetryOptions,
CosmosContainerProactiveInitConfig containerProactiveInitConfig) {
assert(clientTelemetryConfig != null);
Boolean clientTelemetryEnabled = ImplementationBridgeHelpers
.CosmosClientTelemetryConfigHelper
.getCosmosClientTelemetryConfigAccessor()
.isSendClientTelemetryToServiceEnabled(clientTelemetryConfig);
assert(clientTelemetryEnabled != null);
activeClientsCnt.incrementAndGet();
this.clientId = clientIdGenerator.incrementAndGet();
this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ?
String.format("%05d",this.clientId): clientCorrelationId;
clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1);
this.diagnosticsClientConfig = new DiagnosticsClientConfig();
this.diagnosticsClientConfig.withClientId(this.clientId);
this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt);
this.diagnosticsClientConfig.withClientMap(clientMap);
this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled);
this.diagnosticsClientConfig.withConsistency(consistencyLevel);
this.throughputControlEnabled = new AtomicBoolean(false);
this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig;
this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig);
this.sessionRetryOptions = sessionRetryOptions;
logger.info(
"Initializing DocumentClient [{}] with"
+ " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]",
this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol());
try {
this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled;
this.configs = configs;
this.masterKeyOrResourceToken = masterKeyOrResourceToken;
this.serviceEndpoint = serviceEndpoint;
this.credential = credential;
this.tokenCredential = tokenCredential;
this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled;
this.authorizationTokenType = AuthorizationTokenType.Invalid;
if (this.credential != null) {
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.authorizationTokenProvider = null;
hasAuthKeyResourceToken = true;
this.authorizationTokenType = AuthorizationTokenType.ResourceToken;
} else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) {
this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken);
hasAuthKeyResourceToken = false;
this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey;
this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential);
} else {
hasAuthKeyResourceToken = false;
this.authorizationTokenProvider = null;
if (tokenCredential != null) {
this.tokenCredentialScopes = new String[] {
serviceEndpoint.getScheme() + ":
};
this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential
.getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes)));
this.authorizationTokenType = AuthorizationTokenType.AadToken;
}
}
if (connectionPolicy != null) {
this.connectionPolicy = connectionPolicy;
} else {
this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig());
}
this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode());
this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy);
this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled());
this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled());
this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions());
this.diagnosticsClientConfig.withMachineId(tempMachineId);
this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig);
this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions);
this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled;
boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled);
this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing);
this.consistencyLevel = consistencyLevel;
this.userAgentContainer = new UserAgentContainer();
String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix();
if (userAgentSuffix != null && userAgentSuffix.length() > 0) {
userAgentContainer.setSuffix(userAgentSuffix);
}
this.httpClientInterceptor = null;
this.reactorHttpClient = httpClient();
this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs);
this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy);
this.resetSessionTokenRetryPolicy = retryPolicy;
CpuMemoryMonitor.register(this);
this.queryPlanCache = new ConcurrentHashMap<>();
this.apiType = apiType;
this.clientTelemetryConfig = clientTelemetryConfig;
} catch (RuntimeException e) {
logger.error("unexpected failure in initializing client.", e);
close();
throw e;
}
}
@Override
public DiagnosticsClientConfig getConfig() {
return diagnosticsClientConfig;
}
@Override
public CosmosDiagnostics createDiagnostics() {
return diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig));
}
private void initializeGatewayConfigurationReader() {
this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager);
DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount();
if (databaseAccount == null) {
logger.error("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
throw new RuntimeException("Client initialization failed."
+ " Check if the endpoint is reachable and if your auth token is valid. More info: https:
}
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount);
}
private void updateGatewayProxy() {
(this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader);
(this.gatewayProxy).setCollectionCache(this.collectionCache);
(this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache);
(this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations);
}
public void serialize(CosmosClientMetadataCachesSnapshot state) {
RxCollectionCache.serialize(state, this.collectionCache);
}
private void initializeDirectConnectivity() {
this.addressResolver = new GlobalAddressResolver(this,
this.reactorHttpClient,
this.globalEndpointManager,
this.configs.getProtocol(),
this,
this.collectionCache,
this.partitionKeyRangeCache,
userAgentContainer,
null,
this.connectionPolicy,
this.apiType);
this.storeClientFactory = new StoreClientFactory(
this.addressResolver,
this.diagnosticsClientConfig,
this.configs,
this.connectionPolicy,
this.userAgentContainer,
this.connectionSharingAcrossClientsEnabled,
this.clientTelemetry,
this.globalEndpointManager);
this.createStoreModel(true);
}
DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() {
return new DatabaseAccountManagerInternal() {
@Override
public URI getServiceEndpoint() {
return RxDocumentClientImpl.this.getServiceEndpoint();
}
@Override
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
logger.info("Getting database account endpoint from {}", endpoint);
return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint);
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return RxDocumentClientImpl.this.getConnectionPolicy();
}
};
}
RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer,
ConsistencyLevel consistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
return new RxGatewayStoreModel(
this,
sessionContainer,
consistencyLevel,
queryCompatibilityMode,
userAgentContainer,
globalEndpointManager,
httpClient,
apiType);
}
private HttpClient httpClient() {
HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs)
.withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout())
.withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize())
.withProxy(this.connectionPolicy.getProxy())
.withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout());
if (connectionSharingAcrossClientsEnabled) {
return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig);
} else {
diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString());
return HttpClient.createFixed(httpClientConfig);
}
}
private void createStoreModel(boolean subscribeRntbdStatus) {
StoreClient storeClient = this.storeClientFactory.createStoreClient(this,
this.addressResolver,
this.sessionContainer,
this.gatewayConfigurationReader,
this,
this.useMultipleWriteLocations,
this.sessionRetryOptions);
this.storeModel = new ServerStoreModel(storeClient);
}
@Override
public URI getServiceEndpoint() {
return this.serviceEndpoint;
}
@Override
public ConnectionPolicy getConnectionPolicy() {
return this.connectionPolicy;
}
@Override
public boolean isContentResponseOnWriteEnabled() {
return contentResponseOnWriteEnabled;
}
@Override
public ConsistencyLevel getConsistencyLevel() {
return consistencyLevel;
}
@Override
public ClientTelemetry getClientTelemetry() {
return this.clientTelemetry;
}
@Override
public String getClientCorrelationId() {
return this.clientCorrelationId;
}
@Override
public String getMachineId() {
if (this.diagnosticsClientConfig == null) {
return null;
}
return this.diagnosticsClientConfig.getMachineId();
}
@Override
public String getUserAgent() {
return this.userAgentContainer.getUserAgent();
}
@Override
public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (database == null) {
throw new IllegalArgumentException("Database");
}
logger.debug("Creating a Database. id: [{}]", database.getId());
validateResource(database);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
logger.debug("Reading a Database. databaseLink: [{}]", databaseLink);
String path = Utils.joinPath(databaseLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Database, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class));
} catch (Exception e) {
logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT);
}
private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) {
switch (resourceTypeEnum) {
case Database:
return Paths.DATABASES_ROOT;
case DocumentCollection:
return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT);
case Document:
return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT);
case Offer:
return Paths.OFFERS_ROOT;
case User:
return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT);
case ClientEncryptionKey:
return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
case Permission:
return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT);
case Attachment:
return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT);
case StoredProcedure:
return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
case Trigger:
return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT);
case UserDefinedFunction:
return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
case Conflict:
return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT);
default:
throw new IllegalArgumentException("resource type not supported");
}
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) {
if (options == null) {
return null;
}
return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options);
}
private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) {
if (options == null) {
return null;
}
return options.getOperationContextAndListenerTuple();
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum) {
return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this);
}
private <T> Flux<FeedResponse<T>> createQuery(
String parentResourceLink,
SqlQuerySpec sqlQuery,
QueryFeedOperationState state,
Class<T> klass,
ResourceType resourceTypeEnum,
DiagnosticsClientContext innerDiagnosticsFactory) {
String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum);
CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions();
UUID correlationActivityIdOfRequestOptions = qryOptAccessor
.getCorrelationActivityId(nonNullQueryOptions);
UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ?
correlationActivityIdOfRequestOptions : randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions));
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
state.registerDiagnosticsFactory(
diagnosticsFactory::reset,
diagnosticsFactory::merge);
return
ObservableHelper.fluxInlineIfPossibleAsObs(
() -> createQueryInternal(
diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout),
invalidPartitionExceptionRetryPolicy
).flatMap(result -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot());
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()));
}
private <T> Flux<FeedResponse<T>> createQueryInternal(
DiagnosticsClientContext diagnosticsClientContext,
String resourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
IDocumentQueryClient queryClient,
UUID activityId,
final AtomicBoolean isQueryCancelledOnTimeout) {
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory
.createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery,
options, resourceLink, false, activityId,
Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout);
AtomicBoolean isFirstResponse = new AtomicBoolean(true);
return executionContext.flatMap(iDocumentQueryExecutionContext -> {
QueryInfo queryInfo = null;
if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) {
queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo();
}
QueryInfo finalQueryInfo = queryInfo;
Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync()
.map(tFeedResponse -> {
if (finalQueryInfo != null) {
if (finalQueryInfo.hasSelectValue()) {
ModelBridgeInternal
.addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo);
}
if (isFirstResponse.compareAndSet(true, false)) {
ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse,
finalQueryInfo.getQueryPlanDiagnosticsContext());
}
}
return tFeedResponse;
});
RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(options);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(requestOptions);
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
return getFeedResponseFluxWithTimeout(feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout);
}
return feedResponseFlux;
}, Queues.SMALL_BUFFER_SIZE, 1);
}
private static void applyExceptionToMergedDiagnostics(
CosmosQueryRequestOptions requestOptions,
CosmosException exception) {
List<CosmosDiagnostics> cancelledRequestDiagnostics =
qryOptAccessor
.getCancelledRequestDiagnosticsTracker(requestOptions);
if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) {
CosmosDiagnostics aggregratedCosmosDiagnostics =
cancelledRequestDiagnostics
.stream()
.reduce((first, toBeMerged) -> {
ClientSideRequestStatistics clientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
ClientSideRequestStatistics toBeMergedClientSideRequestStatistics =
ImplementationBridgeHelpers
.CosmosDiagnosticsHelper
.getCosmosDiagnosticsAccessor()
.getClientSideRequestStatisticsRaw(first);
if (clientSideRequestStatistics == null) {
return toBeMerged;
} else {
clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics);
return first;
}
})
.get();
BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics);
}
}
private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout(
Flux<FeedResponse<T>> feedResponseFlux,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
CosmosQueryRequestOptions requestOptions,
final AtomicBoolean isQueryCancelledOnTimeout) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout);
cancellationException.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, cancellationException);
return cancellationException;
}
return throwable;
});
}
return feedResponseFlux
.timeout(endToEndTimeout)
.onErrorMap(throwable -> {
if (throwable instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
isQueryCancelledOnTimeout.set(true);
applyExceptionToMergedDiagnostics(requestOptions, exception);
return exception;
}
return throwable;
});
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) {
return queryDatabases(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database);
}
@Override
public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink,
DocumentCollection collection, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink,
DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink,
collection.getId());
validateResource(collection);
String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
});
} catch (Exception e) {
logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (collection == null) {
throw new IllegalArgumentException("collection");
}
logger.debug("Replacing a Collection. id: [{}]", collection.getId());
validateResource(collection);
String path = Utils.joinPath(collection.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class))
.doOnNext(resourceResponse -> {
if (resourceResponse.getResource() != null) {
this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(),
getAltLink(resourceResponse.getResource()),
resourceResponse.getResponseHeaders());
}
});
} catch (Exception e) {
logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.DELETE)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated));
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated ->
this.getStoreProxy(requestPopulated).processMessage(requestPopulated)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
));
}
@Override
public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class));
} catch (Exception e) {
logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class,
Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query,
QueryFeedOperationState state) {
return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection);
}
@Override
public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection);
}
private static String serializeProcedureParams(List<Object> objectArray) {
String[] stringArray = new String[objectArray.size()];
for (int i = 0; i < objectArray.size(); ++i) {
Object object = objectArray.get(i);
if (object instanceof JsonSerializable) {
stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object);
} else {
try {
stringArray[i] = mapper.writeValueAsString(object);
} catch (IOException e) {
throw new IllegalArgumentException("Can't serialize the object into the json string", e);
}
}
}
return String.format("[%s]", StringUtils.join(stringArray, ","));
}
private static void validateResource(Resource resource) {
if (!StringUtils.isEmpty(resource.getId())) {
if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 ||
resource.getId().indexOf('?') != -1 || resource.getId().indexOf('
throw new IllegalArgumentException("Id contains illegal chars.");
}
if (resource.getId().endsWith(" ")) {
throw new IllegalArgumentException("Id ends with a space.");
}
}
}
private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) {
Map<String, String> headers = new HashMap<>();
if (this.useMultipleWriteLocations) {
headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString());
}
if (consistencyLevel != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString());
}
if (options == null) {
if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
return headers;
}
Map<String, String> customOptions = options.getHeaders();
if (customOptions != null) {
headers.putAll(customOptions);
}
boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled;
if (options.isContentResponseOnWriteEnabled() != null) {
contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled();
}
if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) {
headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL);
}
if (options.getIfMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag());
}
if (options.getIfNoneMatchETag() != null) {
headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag());
}
if (options.getConsistencyLevel() != null) {
headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString());
}
if (options.getIndexingDirective() != null) {
headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString());
}
if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) {
String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude);
}
if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) {
String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ",");
headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude);
}
if (!Strings.isNullOrEmpty(options.getSessionToken())) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken());
}
if (options.getResourceTokenExpirySeconds() != null) {
headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY,
String.valueOf(options.getResourceTokenExpirySeconds()));
}
if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString());
} else if (options.getOfferType() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType());
}
if (options.getOfferThroughput() == null) {
if (options.getThroughputProperties() != null) {
Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties());
final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings();
OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null;
if (offerAutoscaleSettings != null) {
autoscaleAutoUpgradeProperties
= offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties();
}
if (offer.hasOfferThroughput() &&
(offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 ||
autoscaleAutoUpgradeProperties != null &&
autoscaleAutoUpgradeProperties
.getAutoscaleThroughputProperties()
.getIncrementPercent() >= 0)) {
throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with "
+ "fixed offer");
}
if (offer.hasOfferThroughput()) {
headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput()));
} else if (offer.getOfferAutoScaleSettings() != null) {
headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS,
ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings()));
}
}
}
if (options.isQuotaInfoEnabled()) {
headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true));
}
if (options.isScriptLoggingEnabled()) {
headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true));
}
if (options.getDedicatedGatewayRequestOptions() != null) {
if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS,
String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions())));
}
if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) {
headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE,
String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()));
}
}
return headers;
}
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return this.resetSessionTokenRetryPolicy;
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Document document,
RequestOptions options) {
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs
.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object document,
RequestOptions options,
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) {
return collectionObs.map(collectionValueHolder -> {
addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v);
return request;
});
}
private void addPartitionKeyInformation(RxDocumentServiceRequest request,
ByteBuffer contentAsByteBuffer,
Object objectDoc, RequestOptions options,
DocumentCollection collection) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
PartitionKeyInternal partitionKeyInternal = null;
if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else if (options != null && options.getPartitionKey() != null) {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey());
} else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) {
partitionKeyInternal = PartitionKeyInternal.getEmpty();
} else if (contentAsByteBuffer != null || objectDoc != null) {
InternalObjectNode internalObjectNode;
if (objectDoc instanceof InternalObjectNode) {
internalObjectNode = (InternalObjectNode) objectDoc;
} else if (objectDoc instanceof ObjectNode) {
internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc);
} else if (contentAsByteBuffer != null) {
contentAsByteBuffer.rewind();
internalObjectNode = new InternalObjectNode(contentAsByteBuffer);
} else {
throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null");
}
Instant serializationStartTime = Instant.now();
partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTime,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION
);
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
} else {
throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation.");
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
}
private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
OperationType operationType,
DiagnosticsClientContext clientContextOverride) {
if (StringUtils.isEmpty(documentCollectionLink)) {
throw new IllegalArgumentException("documentCollectionLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Instant serializationStartTimeUTC = Instant.now();
String trackingId = null;
if (options != null) {
trackingId = options.getTrackingId();
}
ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId);
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
operationType, ResourceType.Document, path, requestHeaders, options, content);
if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if( options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return addPartitionKeyInformation(request, content, document, options, collectionObs);
}
private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy,
String documentCollectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink");
checkNotNull(serverBatchRequest, "expected non null serverBatchRequest");
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody()));
Instant serializationEndTimeUTC = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTimeUTC,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Batch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (requestRetryPolicy != null) {
requestRetryPolicy.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> {
addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v);
return request;
});
}
private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request,
ServerBatchRequest serverBatchRequest,
DocumentCollection collection) {
if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) {
PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue();
PartitionKeyInternal partitionKeyInternal;
if (partitionKey.equals(PartitionKey.NONE)) {
PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey();
partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition);
} else {
partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey);
}
request.setPartitionKeyInternal(partitionKeyInternal);
request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson()));
} else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) {
request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId()));
} else {
throw new UnsupportedOperationException("Unknown Server request.");
}
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString());
request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch()));
request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError()));
request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size());
return request;
}
/**
* NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers
* @param request request to populate headers to
* @param httpMethod http method
* @return Mono, which on subscription will populate the headers in the request passed in the argument.
*/
private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) {
request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123());
if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null
|| this.cosmosAuthorizationTokenResolver != null || this.credential != null) {
String resourceName = request.getResourceAddress();
String authorization = this.getUserAuthorizationToken(
resourceName, request.getResourceType(), httpMethod, request.getHeaders(),
AuthorizationTokenType.PrimaryMasterKey, request.properties);
try {
authorization = URLEncoder.encode(authorization, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("Failed to encode authtoken.", e);
}
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
}
if (this.apiType != null) {
request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString());
}
this.populateCapabilitiesHeader(request);
if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod))
&& !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON);
}
if (RequestVerb.PATCH.equals(httpMethod) &&
!request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) {
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH);
}
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) {
request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
}
MetadataDiagnosticsContext metadataDiagnosticsCtx =
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics);
if (this.requiresFeedRangeFiltering(request)) {
return request.getFeedRange()
.populateFeedRangeFilteringHeaders(
this.getPartitionKeyRangeCache(),
request,
this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request))
.flatMap(this::populateAuthorizationHeader);
}
return this.populateAuthorizationHeader(request);
}
private void populateCapabilitiesHeader(RxDocumentServiceRequest request) {
if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) {
request
.getHeaders()
.put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES);
}
}
private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) {
if (request.getResourceType() != ResourceType.Document &&
request.getResourceType() != ResourceType.Conflict) {
return false;
}
switch (request.getOperationType()) {
case ReadFeed:
case Query:
case SqlQuery:
return request.getFeedRange() != null;
default:
return false;
}
}
@Override
public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) {
if (request == null) {
throw new IllegalArgumentException("request");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return request;
});
} else {
return Mono.just(request);
}
}
@Override
public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) {
if (httpHeaders == null) {
throw new IllegalArgumentException("httpHeaders");
}
if (this.authorizationTokenType == AuthorizationTokenType.AadToken) {
return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache)
.map(authorization -> {
httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization);
return httpHeaders;
});
}
return Mono.just(httpHeaders);
}
@Override
public AuthorizationTokenType getAuthorizationTokenType() {
return this.authorizationTokenType;
}
@Override
public String getUserAuthorizationToken(String resourceName,
ResourceType resourceType,
RequestVerb requestVerb,
Map<String, String> headers,
AuthorizationTokenType tokenType,
Map<String, Object> properties) {
if (this.cosmosAuthorizationTokenResolver != null) {
return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(),
properties != null ? Collections.unmodifiableMap(properties) : null);
} else if (credential != null) {
return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName,
resourceType, headers);
} else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) {
return masterKeyOrResourceToken;
} else {
assert resourceTokensMap != null;
if(resourceType.equals(ResourceType.DatabaseAccount)) {
return this.firstResourceTokenFromPermissionFeed;
}
return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers);
}
}
private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) {
CosmosResourceType cosmosResourceType =
ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString());
if (cosmosResourceType == null) {
return CosmosResourceType.SYSTEM;
}
return cosmosResourceType;
}
void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) {
this.sessionContainer.setSessionToken(request, response.getResponseHeaders());
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
RxStoreModel storeProxy = this.getStoreProxy(requestPopulated);
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple);
});
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request,
DocumentClientRetryPolicy documentClientRetryPolicy,
OperationContextAndListenerTuple operationContextAndListenerTuple) {
return populateHeadersAsync(request, RequestVerb.POST)
.flatMap(requestPopulated -> {
Map<String, String> headers = requestPopulated.getHeaders();
assert (headers != null);
headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true");
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple)
.map(response -> {
this.captureSessionToken(requestPopulated, response);
return response;
}
);
});
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PUT)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) {
return populateHeadersAsync(request, RequestVerb.PATCH)
.flatMap(requestPopulated -> {
if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) {
documentClientRetryPolicy.getRetryContext().updateEndTime();
}
return getStoreProxy(requestPopulated).processMessage(requestPopulated);
});
}
@Override
public Mono<ResourceResponse<Document>> createDocument(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Create,
(opt, e2ecfg, clientCtxOverride) -> createDocumentCore(
collectionLink,
document,
opt,
disableAutomaticIdGeneration,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> createDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(() ->
createDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> createDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy requestRetryPolicy,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Creating a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private static <T> Mono<T> getRxDocumentServiceResponseMonoWithE2ETimeout(
RxDocumentServiceRequest request,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
Mono<T> rxDocumentServiceResponseMono) {
if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) {
Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout();
if (endToEndTimeout.isNegative()) {
return Mono.error(getNegativeTimeoutException(request, endToEndTimeout));
}
request.requestContext.setEndToEndOperationLatencyPolicyConfig(endToEndPolicyConfig);
return rxDocumentServiceResponseMono
.timeout(endToEndTimeout)
.onErrorMap(throwable -> getCancellationException(request, throwable));
}
return rxDocumentServiceResponseMono;
}
private static Throwable getCancellationException(RxDocumentServiceRequest request, Throwable throwable) {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (unwrappedException instanceof TimeoutException) {
CosmosException exception = new OperationCancelledException();
exception.setStackTrace(throwable.getStackTrace());
if (request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
}
return throwable;
}
private static CosmosException getNegativeTimeoutException(RxDocumentServiceRequest request, Duration negativeTimeout) {
checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null");
checkArgument(
negativeTimeout.isNegative(),
"This exception should only be used for negative timeouts");
String message = String.format("Negative timeout '%s' provided.", negativeTimeout);
CosmosException exception = new OperationCancelledException(message, null);
BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED);
if (request != null && request.requestContext != null) {
request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true));
return BridgeInternal.setCosmosDiagnostics(exception, request.requestContext.cosmosDiagnostics);
}
return exception;
}
@Override
public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document,
RequestOptions options, boolean disableAutomaticIdGeneration) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Upsert,
(opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore(
collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> upsertDocumentCore(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> upsertDocumentInternal(
collectionLink,
document,
options,
disableAutomaticIdGeneration,
finalRetryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride),
finalRetryPolicyInstance);
}
private Mono<ResourceResponse<Document>> upsertDocumentInternal(
String collectionLink,
Object document,
RequestOptions options,
boolean disableAutomaticIdGeneration,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink);
Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest(retryPolicyInstance, collectionLink, document,
options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride);
Mono<RxDocumentServiceResponse> responseObservable = reqObs.flatMap(request -> {
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)));
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
documentLink,
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
String documentLink,
Object document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = Utils.getCollectionName(documentLink);
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
documentLink,
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Object document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
if (document == null) {
throw new IllegalArgumentException("document");
}
Document typedDocument = documentFromObject(document, mapper);
return this.replaceDocumentInternal(
documentLink,
typedDocument,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Replace,
(opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore(
document,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> replaceDocumentCore(
Document document,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
if (options == null || options.getPartitionKey() == null) {
String collectionLink = document.getSelfLink();
requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(
collectionCache, requestRetryPolicy, collectionLink, options);
}
DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy;
return ObservableHelper.inlineIfPossibleAsObs(
() -> replaceDocumentInternal(
document,
options,
finalRequestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (document == null) {
throw new IllegalArgumentException("document");
}
return this.replaceDocumentInternal(
document.getSelfLink(),
document,
options,
retryPolicyInstance,
endToEndPolicyConfig,
clientContextOverride);
} catch (Exception e) {
logger.debug("Failure in replacing a database due to [{}]", e.getMessage());
return Mono.error(e);
}
}
private Mono<ResourceResponse<Document>> replaceDocumentInternal(
String documentLink,
Document document,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
if (document == null) {
throw new IllegalArgumentException("document");
}
logger.debug("Replacing a Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Replace);
Instant serializationStartTimeUTC = Instant.now();
if (options != null) {
String trackingId = options.getTrackingId();
if (trackingId != null && !trackingId.isEmpty()) {
document.set(Constants.Properties.TRACKING_ID, trackingId);
}
}
ByteBuffer content = serializeJsonToByteBuffer(document);
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs =
addPartitionKeyInformation(request, content, document, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = replace(request, retryPolicyInstance)
.map(resp -> toResourceResponse(resp, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
}
private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) {
return this.getEffectiveEndToEndOperationLatencyPolicyConfig(
options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null
);
}
private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig(
CosmosEndToEndOperationLatencyPolicyConfig policyConfig) {
return policyConfig != null ? policyConfig : this.cosmosEndToEndOperationLatencyPolicyConfig;
}
@Override
public Mono<ResourceResponse<Document>> patchDocument(String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Patch,
(opt, e2ecfg, clientCtxOverride) -> patchDocumentCore(
documentLink,
cosmosPatchOperations,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> patchDocumentCore(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> patchDocumentInternal(
documentLink,
cosmosPatchOperations,
options,
documentClientRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
documentClientRetryPolicy);
}
private Mono<ResourceResponse<Document>> patchDocumentInternal(
String documentLink,
CosmosPatchOperations cosmosPatchOperations,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink");
checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations");
logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink);
final String path = Utils.joinPath(documentLink, null);
final Map<String, String> requestHeaders =
getRequestHeaders(options, ResourceType.Document, OperationType.Patch);
Instant serializationStartTimeUTC = Instant.now();
ByteBuffer content = ByteBuffer.wrap(
PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options));
Instant serializationEndTime = Instant.now();
SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics =
new SerializationDiagnosticsContext.SerializationDiagnostics(
serializationStartTimeUTC,
serializationEndTime,
SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION);
final RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
clientContextOverride,
OperationType.Patch,
ResourceType.Document,
path,
requestHeaders,
options,
content);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
SerializationDiagnosticsContext serializationDiagnosticsContext =
BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics);
if (serializationDiagnosticsContext != null) {
serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request,
null,
null,
options,
collectionObs);
Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = patch(request, retryPolicyInstance);
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable.map(resp -> toResourceResponse(resp, Document.class));
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
null,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
@Override
public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Delete,
(opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore(
documentLink,
internalObjectNode,
opt,
e2ecfg,
clientCtxOverride),
options,
options != null && options.getNonIdempotentWriteRetriesEnabled()
);
}
private Mono<ResourceResponse<Document>> deleteDocumentCore(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy requestRetryPolicy =
this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(
() -> deleteDocumentInternal(
documentLink,
internalObjectNode,
options,
requestRetryPolicy,
endToEndPolicyConfig,
clientContextOverride),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteDocumentInternal(
String documentLink,
InternalObjectNode internalObjectNode,
RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Deleting a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Delete, ResourceType.Document, path, requestHeaders, options);
if (options != null && options.getNonIdempotentWriteRetriesEnabled()) {
request.setNonIdempotentWriteRetriesEnabled(true);
}
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(
request, null, internalObjectNode, options, collectionObs);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(req -> {
Mono<RxDocumentServiceResponse> rxDocumentServiceResponseMono = this
.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options));
return getRxDocumentServiceResponseMonoWithE2ETimeout(
request, endToEndPolicyConfig, rxDocumentServiceResponseMono);
});
return responseObservable
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
} catch (Exception e) {
logger.debug("Failure in deleting a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy),
requestRetryPolicy);
}
private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink);
String path = Utils.joinPath(collectionLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> this
.deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))
.map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)));
} catch (Exception e) {
logger.debug("Failure in deleting documents due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) {
return readDocument(documentLink, options, this);
}
private Mono<ResourceResponse<Document>> readDocument(
String documentLink,
RequestOptions options,
DiagnosticsClientContext innerDiagnosticsFactory) {
return wrapPointOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Read,
(opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride),
options,
false,
innerDiagnosticsFactory
);
}
private Mono<ResourceResponse<Document>> readDocumentCore(String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride);
return ObservableHelper.inlineIfPossibleAsObs(() -> readDocumentInternal(documentLink, options, retryPolicyInstance, endToEndPolicyConfig, clientContextOverride), retryPolicyInstance);
}
private Mono<ResourceResponse<Document>> readDocumentInternal(String documentLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance,
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
DiagnosticsClientContext clientContextOverride) {
try {
if (StringUtils.isEmpty(documentLink)) {
throw new IllegalArgumentException("documentLink");
}
logger.debug("Reading a Document. documentLink: [{}]", documentLink);
String path = Utils.joinPath(documentLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
getEffectiveClientContext(clientContextOverride),
OperationType.Read, ResourceType.Document, path, requestHeaders, options);
request.requestContext.setExcludeRegions(options.getExcludeRegions());
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request);
Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs);
return requestObs.flatMap(req -> {
Mono<ResourceResponse<Document>> resourceResponseMono = this.read(request, retryPolicyInstance).map(serviceResponse -> toResourceResponse(serviceResponse, Document.class));
return getRxDocumentServiceResponseMonoWithE2ETimeout(request, endToEndPolicyConfig, resourceResponseMono);
});
} catch (Exception e) {
logger.debug("Failure in reading a document due to [{}]", e.getMessage());
return Mono.error(e);
}
}
@Override
public <T> Flux<FeedResponse<T>> readDocuments(
String collectionLink, QueryFeedOperationState state, Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT);
}
@Override
public <T> Mono<FeedResponse<T>> readMany(
List<CosmosItemIdentity> itemIdentityList,
String collectionLink,
QueryFeedOperationState state,
Class<T> klass) {
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true);
state.registerDiagnosticsFactory(
() -> {},
(ctx) -> diagnosticsFactory.merge(ctx)
);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory,
OperationType.Query,
ResourceType.Document,
collectionLink, null
);
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request);
return collectionObs
.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
final PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache
.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null);
return valueHolderMono
.flatMap(collectionRoutingMapValueHolder -> {
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>();
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
itemIdentityList
.forEach(itemIdentity -> {
if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) &&
ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey())
.getComponents().size() != pkDefinition.getPaths().size()) {
throw new IllegalArgumentException(RMResources.PartitionKeyMismatch);
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(
itemIdentity.getPartitionKey()),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
if (partitionRangeItemKeyMap.get(range) == null) {
List<CosmosItemIdentity> list = new ArrayList<>();
list.add(itemIdentity);
partitionRangeItemKeyMap.put(range, list);
} else {
List<CosmosItemIdentity> pairs =
partitionRangeItemKeyMap.get(range);
pairs.add(itemIdentity);
partitionRangeItemKeyMap.put(range, pairs);
}
});
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey());
Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany(
diagnosticsFactory,
partitionRangeItemKeyMap,
resourceLink,
state.getQueryOptions(),
klass);
Flux<FeedResponse<Document>> queries = queryForReadMany(
diagnosticsFactory,
resourceLink,
new SqlQuerySpec(DUMMY_SQL_QUERY),
state.getQueryOptions(),
Document.class,
ResourceType.Document,
collection,
Collections.unmodifiableMap(rangeQueryMap));
return Flux.merge(pointReads, queries)
.collectList()
.map(feedList -> {
List<T> finalList = new ArrayList<>();
HashMap<String, String> headers = new HashMap<>();
ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>();
Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection();
double requestCharge = 0;
for (FeedResponse<Document> page : feedList) {
ConcurrentMap<String, QueryMetrics> pageQueryMetrics =
ModelBridgeInternal.queryMetrics(page);
if (pageQueryMetrics != null) {
pageQueryMetrics.forEach(
aggregatedQueryMetrics::putIfAbsent);
}
requestCharge += page.getRequestCharge();
finalList.addAll(page.getResults().stream().map(document ->
ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList()));
aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics()));
}
CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics);
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
aggregatedDiagnostics, aggregateRequestStatistics);
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
200,
0,
finalList.size(),
requestCharge,
aggregatedDiagnostics,
null
);
diagnosticsAccessor
.setDiagnosticsContext(
aggregatedDiagnostics,
ctx);
}
headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double
.toString(requestCharge));
FeedResponse<T> frp = BridgeInternal
.createFeedResponseWithQueryMetrics(
finalList,
headers,
aggregatedQueryMetrics,
null,
false,
false,
aggregatedDiagnostics);
return frp;
});
})
.onErrorMap(throwable -> {
if (throwable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException)throwable;
CosmosDiagnostics diagnostics = cosmosException.getDiagnostics();
if (diagnostics != null) {
state.mergeDiagnosticsContext();
CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot();
if (ctx != null) {
ctxAccessor.recordOperation(
ctx,
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode(),
0,
cosmosException.getRequestCharge(),
diagnostics,
throwable
);
diagnosticsAccessor
.setDiagnosticsContext(
diagnostics,
state.getDiagnosticsContextSnapshot());
}
}
return cosmosException;
}
return throwable;
});
}
);
}
private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap(
Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap,
PartitionKeyDefinition partitionKeyDefinition) {
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>();
String partitionKeySelector = createPkSelector(partitionKeyDefinition);
for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) {
SqlQuerySpec sqlQuerySpec;
List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue();
if (cosmosItemIdentityList.size() > 1) {
if (partitionKeySelector.equals("[\"id\"]")) {
sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector);
} else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) {
sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition);
} else {
sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector);
}
rangeQueryMap.put(entry.getKey(), sqlQuerySpec);
}
}
return rangeQueryMap;
}
private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(
List<CosmosItemIdentity> idPartitionKeyPairList,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( ");
for (int i = 0; i < idPartitionKeyPairList.size(); i++) {
CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i);
String idValue = itemIdentity.getId();
String idParamName = "@param" + i;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append(idParamName);
if (i < idPartitionKeyPairList.size() - 1) {
queryStringBuilder.append(", ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkParamName = "@param" + (2 * i);
parameters.add(new SqlParameter(pkParamName, pkValue));
String idValue = itemIdentity.getId();
String idParamName = "@param" + (2 * i + 1);
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private SqlQuerySpec createReadManyQuerySpecMultiHash(
List<CosmosItemIdentity> itemIdentities,
PartitionKeyDefinition partitionKeyDefinition) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE ( ");
int paramCount = 0;
for (int i = 0; i < itemIdentities.size(); i++) {
CosmosItemIdentity itemIdentity = itemIdentities.get(i);
PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey();
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey);
String pkValueString = (String) pkValue;
List<List<String>> partitionKeyParams = new ArrayList<>();
List<String> paths = partitionKeyDefinition.getPaths();
int pathCount = 0;
for (String subPartitionKey: pkValueString.split("=")) {
String pkParamName = "@param" + paramCount;
partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName));
parameters.add(new SqlParameter(pkParamName, subPartitionKey));
paramCount++;
pathCount++;
}
String idValue = itemIdentity.getId();
String idParamName = "@param" + paramCount;
paramCount++;
parameters.add(new SqlParameter(idParamName, idValue));
queryStringBuilder.append("(");
queryStringBuilder.append("c.id = ");
queryStringBuilder.append(idParamName);
for (List<String> pkParam: partitionKeyParams) {
queryStringBuilder.append(" AND ");
queryStringBuilder.append(" c.");
queryStringBuilder.append(pkParam.get(0).substring(1));
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParam.get(1));
}
queryStringBuilder.append(" )");
if (i < itemIdentities.size() - 1) {
queryStringBuilder.append(" OR ");
}
}
queryStringBuilder.append(" )");
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) {
return partitionKeyDefinition.getPaths()
.stream()
.map(pathPart -> StringUtils.substring(pathPart, 1))
.map(pathPart -> StringUtils.replace(pathPart, "\"", "\\"))
.map(part -> "[\"" + part + "\"]")
.collect(Collectors.joining());
}
private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
String parentResourceLink,
SqlQuerySpec sqlQuery,
CosmosQueryRequestOptions options,
Class<T> klass,
ResourceType resourceTypeEnum,
DocumentCollection collection,
Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) {
if (rangeQueryMap.isEmpty()) {
return Flux.empty();
}
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options));
Flux<? extends IDocumentQueryExecutionContext<T>> executionContext =
DocumentQueryExecutionContextFactory.createReadManyQueryAsync(
diagnosticsFactory,
queryClient,
collection.getResourceId(),
sqlQuery,
rangeQueryMap,
options,
collection.getResourceId(),
parentResourceLink,
activityId,
klass,
resourceTypeEnum,
isQueryCancelledOnTimeout);
return executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync);
}
private <T> Flux<FeedResponse<Document>> pointReadsForReadMany(
ScopedDiagnosticsFactory diagnosticsFactory,
Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap,
String resourceLink,
CosmosQueryRequestOptions queryRequestOptions,
Class<T> klass) {
return Flux.fromIterable(singleItemPartitionRequestMap.values())
.flatMap(cosmosItemIdentityList -> {
if (cosmosItemIdentityList.size() == 1) {
CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0);
RequestOptions requestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.toRequestOptions(queryRequestOptions);
requestOptions.setPartitionKey(firstIdentity.getPartitionKey());
return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory)
.flatMap(resourceResponse -> Mono.just(
new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null)
))
.onErrorResume(throwable -> {
Throwable unwrappedThrowable = Exceptions.unwrap(throwable);
if (unwrappedThrowable instanceof CosmosException) {
CosmosException cosmosException = (CosmosException) unwrappedThrowable;
int statusCode = cosmosException.getStatusCode();
int subStatusCode = cosmosException.getSubStatusCode();
if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException));
}
}
return Mono.error(unwrappedThrowable);
});
}
return Mono.empty();
})
.flatMap(resourceResponseToExceptionPair -> {
ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft();
CosmosException cosmosException = resourceResponseToExceptionPair.getRight();
FeedResponse<Document> feedResponse;
if (cosmosException != null) {
feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics())));
} else {
CosmosItemResponse<T> cosmosItemResponse =
ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer());
feedResponse = ModelBridgeInternal.createFeedResponse(
Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())),
cosmosItemResponse.getResponseHeaders());
diagnosticsAccessor.addClientSideDiagnosticsToFeed(
feedResponse.getCosmosDiagnostics(),
Collections.singleton(
BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics())));
}
return Mono.just(feedResponse);
});
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) {
return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT);
}
private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) {
return new IDocumentQueryClient () {
@Override
public RxCollectionCache getCollectionCache() {
return RxDocumentClientImpl.this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return RxDocumentClientImpl.this.partitionKeyRangeCache;
}
@Override
public IRetryPolicyFactory getResetSessionTokenRetryPolicy() {
return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy;
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelAsync() {
return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
@Override
public ConsistencyLevel getDesiredConsistencyLevelAsync() {
return RxDocumentClientImpl.this.consistencyLevel;
}
@Override
public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) {
if (operationContextAndListenerTuple == null) {
return RxDocumentClientImpl.this.query(request).single();
} else {
final OperationListener listener =
operationContextAndListenerTuple.getOperationListener();
final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext();
request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId());
listener.requestListener(operationContext, request);
return RxDocumentClientImpl.this.query(request).single().doOnNext(
response -> listener.responseListener(operationContext, response)
).doOnError(
ex -> listener.exceptionListener(operationContext, ex)
);
}
}
@Override
public QueryCompatibilityMode getQueryCompatibilityMode() {
return QueryCompatibilityMode.Default;
}
@Override
public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
RxDocumentServiceRequest req,
BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) {
return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy(
resourceType,
operationType,
retryPolicyFactory,
req,
feedOperation
);
}
@Override
public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) {
return null;
}
};
}
@Override
public <T> Flux<FeedResponse<T>> queryDocuments(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state,
Class<T> classOfT) {
SqlQuerySpecLogger.getInstance().logQuery(querySpec);
return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document);
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed(
final DocumentCollection collection,
final CosmosChangeFeedRequestOptions changeFeedOptions,
Class<T> classOfT) {
checkNotNull(collection, "Argument 'collection' must not be null.");
ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>(
this,
ResourceType.Document,
classOfT,
collection.getAltLink(),
collection.getResourceId(),
changeFeedOptions);
return changeFeedQueryImpl.executeAsync();
}
@Override
public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) {
return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT);
}
@Override
public <T> Flux<FeedResponse<T>> readAllDocuments(
String collectionLink,
PartitionKey partitionKey,
QueryFeedOperationState state,
Class<T> classOfT) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (partitionKey == null) {
throw new IllegalArgumentException("partitionKey");
}
final CosmosQueryRequestOptions effectiveOptions =
qryOptAccessor.clone(state.getQueryOptions());
RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig();
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
ResourceType.Document,
OperationType.Query,
false,
nonNullRequestOptions);
DiagnosticsClientContext effectiveClientContext;
ScopedDiagnosticsFactory diagnosticsFactory;
if (orderedApplicableRegionsForSpeculation.size() < 2) {
effectiveClientContext = this;
diagnosticsFactory = null;
} else {
diagnosticsFactory = new ScopedDiagnosticsFactory(this, false);
state.registerDiagnosticsFactory(
() -> diagnosticsFactory.reset(),
(ctx) -> diagnosticsFactory.merge(ctx));
effectiveClientContext = diagnosticsFactory;
}
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
effectiveClientContext,
OperationType.Query,
ResourceType.Document,
collectionLink,
null
);
Flux<Utils.ValueHolder<DocumentCollection>> collectionObs =
collectionCache.resolveCollectionAsync(null, request).flux();
return collectionObs.flatMap(documentCollectionResourceResponse -> {
DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
PartitionKeyDefinition pkDefinition = collection.getPartitionKey();
String pkSelector = createPkSelector(pkDefinition);
SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector);
String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document);
UUID activityId = randomUuid();
final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false);
IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions()));
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions));
Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs(
() -> {
Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache
.tryLookupAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(),
null,
null).flux();
return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> {
CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v;
if (routingMap == null) {
return Mono.error(new IllegalStateException("Failed to get routing map."));
}
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
BridgeInternal.getPartitionKeyInternal(partitionKey),
pkDefinition);
PartitionKeyRange range =
routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
return createQueryInternal(
effectiveClientContext,
resourceLink,
querySpec,
ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()),
classOfT,
ResourceType.Document,
queryClient,
activityId,
isQueryCancelledOnTimeout);
});
},
invalidPartitionExceptionRetryPolicy);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return innerFlux;
}
return innerFlux
.flatMap(result -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return Mono.just(result);
})
.onErrorMap(throwable -> {
diagnosticsFactory.merge(nonNullRequestOptions);
return throwable;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
});
}
@Override
public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() {
return queryPlanCache;
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class,
Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT));
}
private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
validateResource(storedProcedure);
String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType);
return RxDocumentServiceRequest.create(this, operationType,
ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
}
private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (udf == null) {
throw new IllegalArgumentException("udf");
}
validateResource(udf);
String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink,
StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]",
collectionLink, storedProcedure.getId());
RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options,
OperationType.Create);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (storedProcedure == null) {
throw new IllegalArgumentException("storedProcedure");
}
logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId());
RxDocumentClientImpl.validateResource(storedProcedure);
String path = Utils.joinPath(storedProcedure.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy);
}
private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(storedProcedureLink)) {
throw new IllegalArgumentException("storedProcedureLink");
}
logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class));
} catch (Exception e) {
logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class,
Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query,
QueryFeedOperationState state) {
return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink,
SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure);
}
@Override
public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy);
}
@Override
public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
boolean disableAutomaticIdGeneration) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy);
}
private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink,
RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) {
try {
logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink);
String path = Utils.joinPath(storedProcedureLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript);
requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ExecuteJavaScript,
ResourceType.StoredProcedure, path,
procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "",
requestHeaders, options);
if (options != null) {
request.requestContext.setExcludeRegions(options.getExcludeRegions());
}
if (retryPolicy != null) {
retryPolicy.onBeforeSendRequest(request);
}
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options))
.map(response -> {
this.captureSessionToken(request, response);
return toStoredProcedureResponse(response);
}));
} catch (Exception e) {
logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink,
ServerBatchRequest serverBatchRequest,
RequestOptions options,
DocumentClientRetryPolicy requestRetryPolicy,
boolean disableAutomaticIdGeneration) {
try {
logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size());
Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration);
Mono<RxDocumentServiceResponse> responseObservable =
requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options)));
return responseObservable
.map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true));
} catch (Exception ex) {
logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex);
return Mono.error(ex);
}
}
@Override
public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink,
trigger.getId());
RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType);
return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path,
trigger, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (trigger == null) {
throw new IllegalArgumentException("trigger");
}
logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId());
RxDocumentClientImpl.validateResource(trigger);
String path = Utils.joinPath(trigger.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(triggerLink)) {
throw new IllegalArgumentException("triggerLink");
}
logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink);
String path = Utils.joinPath(triggerLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Trigger, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class));
} catch (Exception e) {
logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class,
Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query,
QueryFeedOperationState state) {
return queryTriggers(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger);
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink,
UserDefinedFunction udf, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink,
UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink,
udf.getId());
RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options,
OperationType.Create);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (udf == null) {
throw new IllegalArgumentException("udf");
}
logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId());
validateResource(udf);
String path = Utils.joinPath(udf.getSelfLink(), null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null){
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(udfLink)) {
throw new IllegalArgumentException("udfLink");
}
logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink);
String path = Utils.joinPath(udfLink, null);
Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class));
} catch (Exception e) {
logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class,
Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
String query,
QueryFeedOperationState state) {
return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions(
String collectionLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction);
}
@Override
public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class,
Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query,
QueryFeedOperationState state) {
return queryConflicts(collectionLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict);
}
@Override
public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(conflictLink)) {
throw new IllegalArgumentException("conflictLink");
}
logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink);
String path = Utils.joinPath(conflictLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options);
Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options);
return reqObs.flatMap(req -> {
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class));
});
} catch (Exception e) {
logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId());
RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (user == null) {
throw new IllegalArgumentException("user");
}
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.User, path, user, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (user == null) {
throw new IllegalArgumentException("user");
}
logger.debug("Replacing a User. user id [{}]", user.getId());
RxDocumentClientImpl.validateResource(user);
String path = Utils.joinPath(user.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.User, path, user, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Deleting a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
logger.debug("Reading a User. userLink [{}]", userLink);
String path = Utils.joinPath(userLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.User, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class));
} catch (Exception e) {
logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.User, User.class,
Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) {
return queryUsers(databaseLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(clientEncryptionKeyLink)) {
throw new IllegalArgumentException("clientEncryptionKeyLink");
}
logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink);
String path = Utils.joinPath(clientEncryptionKeyLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink,
ClientEncryptionKey clientEncryptionKey, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId());
RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options,
OperationType operationType) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey,
String nameBasedLink,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey,
nameBasedLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (clientEncryptionKey == null) {
throw new IllegalArgumentException("clientEncryptionKey");
}
logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId());
RxDocumentClientImpl.validateResource(clientEncryptionKey);
String path = Utils.joinPath(nameBasedLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey,
OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders,
options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class));
} catch (Exception e) {
logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys(
String databaseLink,
QueryFeedOperationState state) {
if (StringUtils.isEmpty(databaseLink)) {
throw new IllegalArgumentException("databaseLink");
}
return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class,
Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys(
String databaseLink,
SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey);
}
@Override
public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null));
}
private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Create);
return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission,
RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission,
RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId());
RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options,
OperationType.Upsert);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission,
RequestOptions options, OperationType operationType) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
if (permission == null) {
throw new IllegalArgumentException("permission");
}
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType);
return RxDocumentServiceRequest.create(this,
operationType, ResourceType.Permission, path, permission, requestHeaders, options);
}
@Override
public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (permission == null) {
throw new IllegalArgumentException("permission");
}
logger.debug("Replacing a Permission. permission id [{}]", permission.getId());
RxDocumentClientImpl.validateResource(permission);
String path = Utils.joinPath(permission.getSelfLink(), null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options,
DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Delete, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) {
try {
if (StringUtils.isEmpty(permissionLink)) {
throw new IllegalArgumentException("permissionLink");
}
logger.debug("Reading a Permission. permissionLink [{}]", permissionLink);
String path = Utils.joinPath(permissionLink, null);
Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Permission, path, requestHeaders, options);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class));
} catch (Exception e) {
logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) {
if (StringUtils.isEmpty(userLink)) {
throw new IllegalArgumentException("userLink");
}
return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class,
Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT));
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query,
QueryFeedOperationState state) {
return queryPermissions(userLink, new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec,
QueryFeedOperationState state) {
return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission);
}
@Override
public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy);
}
private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
if (offer == null) {
throw new IllegalArgumentException("offer");
}
logger.debug("Replacing an Offer. offer id [{}]", offer.getId());
RxDocumentClientImpl.validateResource(offer);
String path = Utils.joinPath(offer.getSelfLink(), null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace,
ResourceType.Offer, path, offer, null, null);
return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Mono<ResourceResponse<Offer>> readOffer(String offerLink) {
DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance);
}
private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) {
try {
if (StringUtils.isEmpty(offerLink)) {
throw new IllegalArgumentException("offerLink");
}
logger.debug("Reading an Offer. offerLink [{}]", offerLink);
String path = Utils.joinPath(offerLink, null);
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null);
if (retryPolicyInstance != null) {
retryPolicyInstance.onBeforeSendRequest(request);
}
return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class));
} catch (Exception e) {
logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
@Override
public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) {
return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class,
Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null));
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
QueryFeedOperationState state,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeed(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink) {
DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.fluxInlineIfPossibleAsObs(
() -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy),
retryPolicy);
}
private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal(
CosmosQueryRequestOptions options,
ResourceType resourceType,
Class<T> klass,
String resourceLink,
DocumentClientRetryPolicy retryPolicy) {
final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions();
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : -1;
assert(resourceType != ResourceType.Document);
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> {
Map<String, String> requestHeaders = new HashMap<>();
if (continuationToken != null) {
requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken);
}
requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize));
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions);
retryPolicy.onBeforeSendRequest(request);
return request;
};
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc =
request -> readFeed(request)
.map(response -> toFeedResponsePage(
response,
ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.getItemFactoryMethod(nonNullOptions, klass),
klass));
return Paginator
.getPaginatedQueryResultAsObservable(
nonNullOptions,
createRequestFunc,
executeFunc,
maxPageSize);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) {
return queryOffers(new SqlQuerySpec(query), state);
}
@Override
public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) {
return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer);
}
@Override
public Mono<DatabaseAccount> getDatabaseAccount() {
DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null);
return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy),
documentClientRetryPolicy);
}
private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) {
try {
logger.debug("Getting Database Account");
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read,
ResourceType.DatabaseAccount, "",
(HashMap<String, String>) null,
null);
return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount);
} catch (Exception e) {
logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e);
return Mono.error(e);
}
}
public Object getSession() {
return this.sessionContainer;
}
public void setSession(Object sessionContainer) {
this.sessionContainer = (SessionContainer) sessionContainer;
}
@Override
public RxClientCollectionCache getCollectionCache() {
return this.collectionCache;
}
@Override
public RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
@Override
public GlobalEndpointManager getGlobalEndpointManager() {
return this.globalEndpointManager;
}
@Override
public AddressSelector getAddressSelector() {
return new AddressSelector(this.addressResolver, this.configs.getProtocol());
}
public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) {
return Flux.defer(() -> {
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this,
OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null);
return this.populateHeadersAsync(request, RequestVerb.GET)
.flatMap(requestPopulated -> {
requestPopulated.setEndpointOverride(endpoint);
return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> {
String message = String.format("Failed to retrieve database account information. %s",
e.getCause() != null
? e.getCause().toString()
: e.toString());
logger.warn(message);
}).map(rsp -> rsp.getResource(DatabaseAccount.class))
.doOnNext(databaseAccount ->
this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled()
&& BridgeInternal.isEnableMultipleWriteLocations(databaseAccount));
});
});
}
/**
* Certain requests must be routed through gateway even when the client connectivity mode is direct.
*
* @param request
* @return RxStoreModel
*/
private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) {
if (request.useGatewayMode) {
return this.gatewayProxy;
}
ResourceType resourceType = request.getResourceType();
OperationType operationType = request.getOperationType();
if (resourceType == ResourceType.Offer ||
resourceType == ResourceType.ClientEncryptionKey ||
resourceType.isScript() && operationType != OperationType.ExecuteJavaScript ||
resourceType == ResourceType.PartitionKeyRange ||
resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) {
return this.gatewayProxy;
}
if (operationType == OperationType.Create
|| operationType == OperationType.Upsert) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection ||
resourceType == ResourceType.Permission) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Delete) {
if (resourceType == ResourceType.Database ||
resourceType == ResourceType.User ||
resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Replace) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else if (operationType == OperationType.Read) {
if (resourceType == ResourceType.DocumentCollection) {
return this.gatewayProxy;
} else {
return this.storeModel;
}
} else {
if ((operationType == OperationType.Query ||
operationType == OperationType.SqlQuery ||
operationType == OperationType.ReadFeed) &&
Utils.isCollectionChild(request.getResourceType())) {
if (request.getPartitionKeyRangeIdentity() == null &&
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) {
return this.gatewayProxy;
}
}
return this.storeModel;
}
}
@Override
public void close() {
logger.info("Attempting to close client {}", this.clientId);
if (!closed.getAndSet(true)) {
activeClientsCnt.decrementAndGet();
logger.info("Shutting down ...");
logger.info("Closing Global Endpoint Manager ...");
LifeCycleUtils.closeQuietly(this.globalEndpointManager);
logger.info("Closing StoreClientFactory ...");
LifeCycleUtils.closeQuietly(this.storeClientFactory);
logger.info("Shutting down reactorHttpClient ...");
LifeCycleUtils.closeQuietly(this.reactorHttpClient);
logger.info("Shutting down CpuMonitor ...");
CpuMemoryMonitor.unregister(this);
if (this.throughputControlEnabled.get()) {
logger.info("Closing ThroughputControlStore ...");
this.throughputControlStore.close();
}
logger.info("Shutting down completed.");
} else {
logger.warn("Already shutdown!");
}
}
@Override
public ItemDeserializer getItemDeserializer() {
return this.itemDeserializer;
}
@Override
public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) {
checkNotNull(group, "Throughput control group can not be null");
if (this.throughputControlEnabled.compareAndSet(false, true)) {
this.throughputControlStore =
new ThroughputControlStore(
this.collectionCache,
this.connectionPolicy.getConnectionMode(),
this.partitionKeyRangeCache);
if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) {
this.storeModel.enableThroughputControl(throughputControlStore);
} else {
this.gatewayProxy.enableThroughputControl(throughputControlStore);
}
}
this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono);
}
@Override
public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) {
return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig);
}
@Override
public ConsistencyLevel getDefaultConsistencyLevelOfAccount() {
return this.gatewayConfigurationReader.getDefaultConsistencyLevel();
}
/***
* Configure fault injector provider.
*
* @param injectorProvider the fault injector provider.
*/
@Override
public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) {
checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null");
if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) {
this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs);
this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs);
}
this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs);
}
@Override
public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities);
}
@Override
public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) {
this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities);
}
@Override
public String getMasterKeyOrResourceToken() {
return this.masterKeyOrResourceToken;
}
private static SqlQuerySpec createLogicalPartitionScanQuerySpec(
PartitionKey partitionKey,
String partitionKeySelector) {
StringBuilder queryStringBuilder = new StringBuilder();
List<SqlParameter> parameters = new ArrayList<>();
queryStringBuilder.append("SELECT * FROM c WHERE");
Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey);
String pkParamName = "@pkValue";
parameters.add(new SqlParameter(pkParamName, pkValue));
queryStringBuilder.append(" c");
queryStringBuilder.append(partitionKeySelector);
queryStringBuilder.append((" = "));
queryStringBuilder.append(pkParamName);
return new SqlQuerySpec(queryStringBuilder.toString(), parameters);
}
@Override
public Mono<List<FeedRange>> getFeedRanges(String collectionLink) {
InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy(
this.collectionCache,
null,
collectionLink,
new HashMap<>());
RxDocumentServiceRequest request = RxDocumentServiceRequest.create(
this,
OperationType.Query,
ResourceType.Document,
collectionLink,
null);
invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request);
return ObservableHelper.inlineIfPossibleAsObs(
() -> getFeedRangesInternal(request, collectionLink),
invalidPartitionExceptionRetryPolicy);
}
private Mono<List<FeedRange>> getFeedRangesInternal(RxDocumentServiceRequest request, String collectionLink) {
logger.debug("getFeedRange collectionLink=[{}]", collectionLink);
if (StringUtils.isEmpty(collectionLink)) {
throw new IllegalArgumentException("collectionLink");
}
Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null,
request);
return collectionObs.flatMap(documentCollectionResourceResponse -> {
final DocumentCollection collection = documentCollectionResourceResponse.v;
if (collection == null) {
return Mono.error(new IllegalStateException("Collection cannot be null"));
}
Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache
.tryGetOverlappingRangesAsync(
BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, true, null);
return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request));
});
}
private static List<FeedRange> toFeedRanges(
Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) {
final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v;
if (partitionKeyRangeList == null) {
request.forceNameCacheRefresh = true;
throw new InvalidPartitionException();
}
List<FeedRange> feedRanges = new ArrayList<>();
partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange)));
return feedRanges;
}
private static FeedRange toFeedRange(PartitionKeyRange pkRange) {
return new FeedRangeEpkImpl(pkRange.toRange());
}
/**
* Creates a type 4 (pseudo randomly generated) UUID.
* <p>
* The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator.
*
* @return A randomly generated {@link UUID}.
*/
public static UUID randomUuid() {
return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong());
}
static UUID randomUuid(long msb, long lsb) {
msb &= 0xffffffffffff0fffL;
msb |= 0x0000000000004000L;
lsb &= 0x3fffffffffffffffL;
lsb |= 0x8000000000000000L;
return new UUID(msb, lsb);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled) {
return wrapPointOperationWithAvailabilityStrategy(
resourceType,
operationType,
callback,
initialRequestOptions,
idempotentWriteRetriesEnabled,
this
);
}
private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy(
ResourceType resourceType,
OperationType operationType,
DocumentPointOperation callback,
RequestOptions initialRequestOptions,
boolean idempotentWriteRetriesEnabled,
DiagnosticsClientContext innerDiagnosticsFactory) {
checkNotNull(resourceType, "Argument 'resourceType' must not be null.");
checkNotNull(operationType, "Argument 'operationType' must not be null.");
checkNotNull(callback, "Argument 'callback' must not be null.");
final RequestOptions nonNullRequestOptions =
initialRequestOptions != null ? initialRequestOptions : new RequestOptions();
checkArgument(
resourceType == ResourceType.Document,
"This method can only be used for document point operations.");
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions);
List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
idempotentWriteRetriesEnabled,
nonNullRequestOptions);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>();
final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false);
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions);
if (monoList.isEmpty()) {
Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedOptions.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
nonNullRequestOptions.getExcludeRegions(),
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono =
callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory)
.map(NonTransientPointOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientPointOperationResult(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
diagnosticsFactory.merge(nonNullRequestOptions);
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
CosmosException cosmosException = Utils.as(innerException, CosmosException.class);
diagnosticsFactory.merge(nonNullRequestOptions);
return cosmosException;
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
diagnosticsFactory.merge(nonNullRequestOptions);
return exception;
})
.doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions));
}
private static boolean isCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
return unwrappedException instanceof CosmosException;
}
private static boolean isNonTransientCosmosException(Throwable t) {
final Throwable unwrappedException = Exceptions.unwrap(t);
if (!(unwrappedException instanceof CosmosException)) {
return false;
}
CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class);
return isNonTransientResultForHedging(
cosmosException.getStatusCode(),
cosmosException.getSubStatusCode());
}
private List<String> getEffectiveExcludedRegionsForHedging(
List<String> initialExcludedRegions,
List<String> applicableRegions,
String currentRegion) {
List<String> effectiveExcludedRegions = new ArrayList<>();
if (initialExcludedRegions != null) {
effectiveExcludedRegions.addAll(initialExcludedRegions);
}
for (String applicableRegion: applicableRegions) {
if (!applicableRegion.equals(currentRegion)) {
effectiveExcludedRegions.add(applicableRegion);
}
}
return effectiveExcludedRegions;
}
private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) {
if (statusCode < HttpConstants.StatusCodes.BADREQUEST) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT &&
subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.BADREQUEST
|| statusCode == HttpConstants.StatusCodes.CONFLICT
|| statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED
|| statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED
|| statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE
|| statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) {
return true;
}
if (statusCode == HttpConstants.StatusCodes.NOTFOUND
&& subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) {
return true;
}
return false;
}
private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) {
if (clientContextOverride != null) {
return clientContextOverride;
}
return this;
}
/**
* Returns the applicable endpoints ordered by preference list if any
* @param operationType - the operationT
* @return the applicable endpoints ordered by preference list if any
*/
private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) {
if (operationType.isReadOnlyOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions));
} else if (operationType.isWriteOperation()) {
return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions));
}
return EMPTY_ENDPOINT_LIST;
}
private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) {
if (orderedEffectiveEndpointsList == null) {
return EMPTY_ENDPOINT_LIST;
}
int i = 0;
while (i < orderedEffectiveEndpointsList.size()) {
if (orderedEffectiveEndpointsList.get(i) == null) {
orderedEffectiveEndpointsList.remove(i);
} else {
i++;
}
}
return orderedEffectiveEndpointsList;
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
RequestOptions options) {
return getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
isIdempotentWriteRetriesEnabled,
options.getExcludeRegions());
}
private List<String> getApplicableRegionsForSpeculation(
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig,
ResourceType resourceType,
OperationType operationType,
boolean isIdempotentWriteRetriesEnabled,
List<String> excludedRegions) {
if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) {
return EMPTY_REGION_LIST;
}
if (resourceType != ResourceType.Document) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) {
return EMPTY_REGION_LIST;
}
if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) {
return EMPTY_REGION_LIST;
}
if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) {
return EMPTY_REGION_LIST;
}
List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions);
HashSet<String> normalizedExcludedRegions = new HashSet<>();
if (excludedRegions != null) {
excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT)));
}
List<String> orderedRegionsForSpeculation = new ArrayList<>();
endpoints.forEach(uri -> {
String regionName = this.globalEndpointManager.getRegionName(uri, operationType);
if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) {
orderedRegionsForSpeculation.add(regionName);
}
});
return orderedRegionsForSpeculation;
}
private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy(
final ResourceType resourceType,
final OperationType operationType,
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req,
final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation
) {
checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null.");
checkNotNull(req, "Argument 'req' must not be null.");
assert(resourceType == ResourceType.Document);
CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig =
this.getEffectiveEndToEndOperationLatencyPolicyConfig(
req.requestContext.getEndToEndOperationLatencyPolicyConfig());
List<String> initialExcludedRegions = req.requestContext.getExcludeRegions();
List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation(
endToEndPolicyConfig,
resourceType,
operationType,
false,
initialExcludedRegions
);
if (orderedApplicableRegionsForSpeculation.size() < 2) {
return feedOperation.apply(retryPolicyFactory, req);
}
ThresholdBasedAvailabilityStrategy availabilityStrategy =
(ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy();
List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>();
orderedApplicableRegionsForSpeculation
.forEach(region -> {
RxDocumentServiceRequest clonedRequest = req.clone();
if (monoList.isEmpty()) {
Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
if (logger.isDebugEnabled()) {
monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug(
"STARTING to process {} operation in region '{}'",
operationType,
region)));
} else {
monoList.add(initialMonoAcrossAllRegions);
}
} else {
clonedRequest.requestContext.setExcludeRegions(
getEffectiveExcludedRegionsForHedging(
initialExcludedRegions,
orderedApplicableRegionsForSpeculation,
region)
);
Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono =
feedOperation.apply(retryPolicyFactory, clonedRequest)
.map(NonTransientFeedOperationResult::new)
.onErrorResume(
RxDocumentClientImpl::isNonTransientCosmosException,
t -> Mono.just(
new NonTransientFeedOperationResult<>(
Utils.as(Exceptions.unwrap(t), CosmosException.class))));
Duration delayForCrossRegionalRetry = (availabilityStrategy)
.getThreshold()
.plus((availabilityStrategy)
.getThresholdStep()
.multipliedBy(monoList.size() - 1));
if (logger.isDebugEnabled()) {
monoList.add(
regionalCrossRegionRetryMono
.doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region))
.delaySubscription(delayForCrossRegionalRetry));
} else {
monoList.add(
regionalCrossRegionRetryMono
.delaySubscription(delayForCrossRegionalRetry));
}
}
});
return Mono
.firstWithValue(monoList)
.flatMap(nonTransientResult -> {
if (nonTransientResult.isError()) {
return Mono.error(nonTransientResult.exception);
}
return Mono.just(nonTransientResult.response);
})
.onErrorMap(throwable -> {
Throwable exception = Exceptions.unwrap(throwable);
if (exception instanceof NoSuchElementException) {
List<Throwable> innerThrowables = Exceptions
.unwrapMultiple(exception.getCause());
int index = 0;
for (Throwable innerThrowable : innerThrowables) {
Throwable innerException = Exceptions.unwrap(innerThrowable);
if (innerException instanceof CosmosException) {
return Utils.as(innerException, CosmosException.class);
} else if (innerException instanceof NoSuchElementException) {
logger.trace(
"Operation in {} completed with empty result because it was cancelled.",
orderedApplicableRegionsForSpeculation.get(index));
} else if (logger.isWarnEnabled()) {
String message = "Unexpected Non-CosmosException when processing operation in '"
+ orderedApplicableRegionsForSpeculation.get(index)
+ "'.";
logger.warn(
message,
innerException
);
}
index++;
}
}
return exception;
});
}
@FunctionalInterface
private interface DocumentPointOperation {
Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride);
}
private static class NonTransientPointOperationResult {
private final ResourceResponse<Document> response;
private final CosmosException exception;
public NonTransientPointOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientPointOperationResult(ResourceResponse<Document> response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public ResourceResponse<Document> getResponse() {
return this.response;
}
}
private static class NonTransientFeedOperationResult<T> {
private final T response;
private final CosmosException exception;
public NonTransientFeedOperationResult(CosmosException exception) {
checkNotNull(exception, "Argument 'exception' must not be null.");
this.exception = exception;
this.response = null;
}
public NonTransientFeedOperationResult(T response) {
checkNotNull(response, "Argument 'response' must not be null.");
this.exception = null;
this.response = response;
}
public boolean isError() {
return this.exception != null;
}
public CosmosException getException() {
return this.exception;
}
public T getResponse() {
return this.response;
}
}
private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext {
private final AtomicBoolean isMerged = new AtomicBoolean(false);
private final DiagnosticsClientContext inner;
private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics;
private final boolean shouldCaptureAllFeedDiagnostics;
public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) {
checkNotNull(inner, "Argument 'inner' must not be null.");
this.inner = inner;
this.createdDiagnostics = new ConcurrentLinkedQueue<>();
this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics;
}
@Override
public DiagnosticsClientConfig getConfig() {
return inner.getConfig();
}
@Override
public CosmosDiagnostics createDiagnostics() {
CosmosDiagnostics diagnostics = inner.createDiagnostics();
createdDiagnostics.add(diagnostics);
return diagnostics;
}
@Override
public String getUserAgent() {
return inner.getUserAgent();
}
public void merge(RequestOptions requestOptions) {
CosmosDiagnosticsContext knownCtx = null;
if (requestOptions != null) {
CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot();
if (ctxSnapshot != null) {
knownCtx = requestOptions.getDiagnosticsContextSnapshot();
}
}
merge(knownCtx);
}
public void merge(CosmosDiagnosticsContext knownCtx) {
if (!isMerged.compareAndSet(false, true)) {
return;
}
CosmosDiagnosticsContext ctx = null;
if (knownCtx != null) {
ctx = knownCtx;
} else {
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() != null) {
ctx = diagnostics.getDiagnosticsContext();
break;
}
}
}
if (ctx == null) {
return;
}
for (CosmosDiagnostics diagnostics : this.createdDiagnostics) {
if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) {
if (this.shouldCaptureAllFeedDiagnostics &&
diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) {
AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics);
if (isCaptured != null) {
isCaptured.set(true);
}
}
ctxAccessor.addDiagnostics(ctx, diagnostics);
}
}
}
public void reset() {
this.createdDiagnostics.clear();
this.isMerged.set(false);
}
}
} |
nit; Should we use the const OWNERSHIP_PATH here? (to align with the use of the const CHECKPOINT_PATH in `testListCheckpoint()`) | public void testListOwnership() {
final String fullyQualifiedNamespace = "namespace.microsoft.com";
final String eventHubName = "MyEventHubName";
final String consumerGroup = "$Default";
final String prefix = getLegacyPrefix(fullyQualifiedNamespace, eventHubName, consumerGroup);
final String ownershipPrefix = prefix + "/ownership/";
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
BlobItem blobItem = getOwnershipBlobItem("owner1", "etag", ownershipPrefix + "0");
BlobItem blobItem2 = getOwnershipBlobItem("owner1", "etag", prefix + "/0");
BlobItem blobItem3 = new BlobItem().setName(ownershipPrefix + "5");
BlobItem blobItem4 = getOwnershipBlobItem(null, "2", ownershipPrefix + "2");
PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.just(new PagedResponseBase<HttpHeaders,
BlobItem>(null, 200, null,
Arrays.asList(blobItem, blobItem2, blobItem3, blobItem4), null,
null)));
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenAnswer(invocation -> {
final ListBlobsOptions argument = invocation.getArgument(0);
final String arg = argument.getPrefix();
if (ownershipPrefix.equals(arg)) {
return response;
} else {
return Flux.error(new IllegalArgumentException("Did not expect this prefix: " + arg));
}
});
StepVerifier.create(blobCheckpointStore.listOwnership(fullyQualifiedNamespace, eventHubName, consumerGroup))
.assertNext(partitionOwnership -> {
assertEquals("owner1", partitionOwnership.getOwnerId());
assertEquals("0", partitionOwnership.getPartitionId());
assertEquals(eventHubName, partitionOwnership.getEventHubName());
assertEquals(consumerGroup, partitionOwnership.getConsumerGroup());
assertEquals("etag", partitionOwnership.getETag());
assertEquals(fullyQualifiedNamespace, partitionOwnership.getFullyQualifiedNamespace());
})
.assertNext(partitionOwnership -> {
assertEquals("", partitionOwnership.getOwnerId());
assertEquals("2", partitionOwnership.getPartitionId());
assertEquals(eventHubName, partitionOwnership.getEventHubName());
assertEquals(consumerGroup, partitionOwnership.getConsumerGroup());
assertEquals("2", partitionOwnership.getETag());
assertEquals(fullyQualifiedNamespace, partitionOwnership.getFullyQualifiedNamespace());
}).verifyComplete();
} | final String ownershipPrefix = prefix + "/ownership/"; | public void testListOwnership() {
final String fullyQualifiedNamespace = "namespace.microsoft.com";
final String eventHubName = "MyEventHubName";
final String consumerGroup = "$Default";
final String prefix = getLegacyPrefix(fullyQualifiedNamespace, eventHubName, consumerGroup);
final String ownershipPrefix = prefix + OWNERSHIP_PATH;
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
BlobItem blobItem = getOwnershipBlobItem("owner1", "etag", ownershipPrefix + "0");
BlobItem blobItem2 = getOwnershipBlobItem("owner1", "etag", prefix + "/0");
BlobItem blobItem3 = new BlobItem().setName(ownershipPrefix + "5");
BlobItem blobItem4 = getOwnershipBlobItem(null, "2", ownershipPrefix + "2");
PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.just(new PagedResponseBase<HttpHeaders,
BlobItem>(null, 200, null,
Arrays.asList(blobItem, blobItem2, blobItem3, blobItem4), null,
null)));
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenAnswer(invocation -> {
final ListBlobsOptions argument = invocation.getArgument(0);
final String arg = argument.getPrefix();
if (ownershipPrefix.equals(arg)) {
return response;
} else {
return Flux.error(new IllegalArgumentException("Did not expect this prefix: " + arg));
}
});
StepVerifier.create(blobCheckpointStore.listOwnership(fullyQualifiedNamespace, eventHubName, consumerGroup))
.assertNext(partitionOwnership -> {
assertEquals("owner1", partitionOwnership.getOwnerId());
assertEquals("0", partitionOwnership.getPartitionId());
assertEquals(eventHubName, partitionOwnership.getEventHubName());
assertEquals(consumerGroup, partitionOwnership.getConsumerGroup());
assertEquals("etag", partitionOwnership.getETag());
assertEquals(fullyQualifiedNamespace, partitionOwnership.getFullyQualifiedNamespace());
})
.assertNext(partitionOwnership -> {
assertEquals("", partitionOwnership.getOwnerId());
assertEquals("2", partitionOwnership.getPartitionId());
assertEquals(eventHubName, partitionOwnership.getEventHubName());
assertEquals(consumerGroup, partitionOwnership.getConsumerGroup());
assertEquals("2", partitionOwnership.getETag());
assertEquals(fullyQualifiedNamespace, partitionOwnership.getFullyQualifiedNamespace());
}).verifyComplete();
} | class BlobCheckpointStoreTests {
@Mock
private BlobContainerAsyncClient blobContainerAsyncClient;
@Mock
private BlockBlobAsyncClient blockBlobAsyncClient;
@Mock
private BlobAsyncClient blobAsyncClient;
private AutoCloseable autoCloseable;
@BeforeEach
public void beforeEach() {
this.autoCloseable = MockitoAnnotations.openMocks(this);
}
@AfterEach
public void afterEach() throws Exception {
if (autoCloseable != null) {
autoCloseable.close();
}
Mockito.framework().clearInlineMock(this);
}
/**
* Tests that listing ownership works.
*/
@Test
/**
* Tests that errors are propagated with {@link CheckpointStore
*/
@Test
public void testListOwnershipError() {
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.error(new SocketTimeoutException()));
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
StepVerifier.create(blobCheckpointStore.listOwnership("ns", "eh", "cg"))
.expectError(SocketTimeoutException.class).verify();
}
/**
* Verifies that it lists checkpoints.
*/
@Test
public void testListCheckpoint() {
final String fullyQualifiedNamespace = "namespace.microsoft.com";
final String eventHubName = "MyEventHubName";
final String consumerGroup = "$Default";
final String prefix = getLegacyPrefix(fullyQualifiedNamespace, eventHubName, consumerGroup);
final String checkpointPrefix = prefix + CHECKPOINT_PATH;
final BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
final BlobItem blobItem = getCheckpointBlobItem("230", "1", checkpointPrefix + "0");
final BlobItem blobItem2 = new BlobItem().setName(checkpointPrefix + "1");
final BlobItem blobItem3 = getCheckpointBlobItem("233", "3", prefix + "1");
final PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.just(
new PagedResponseBase<HttpHeaders, BlobItem>(null, 200, null,
Arrays.asList(blobItem, blobItem2, blobItem3), null, null)));
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenAnswer(invocation -> {
final ListBlobsOptions listBlobsOptions = invocation.getArgument(0);
final String arg = listBlobsOptions.getPrefix();
if (checkpointPrefix.equals(arg)) {
return response;
} else {
return Flux.error(new IllegalArgumentException("Did not expect this prefix: " + arg));
}
});
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
StepVerifier.create(blobCheckpointStore.listCheckpoints(fullyQualifiedNamespace, eventHubName, consumerGroup))
.assertNext(checkpoint -> {
assertEquals("0", checkpoint.getPartitionId());
assertEquals(eventHubName, checkpoint.getEventHubName());
assertEquals(consumerGroup, checkpoint.getConsumerGroup());
assertEquals(1L, checkpoint.getSequenceNumber());
assertEquals(230L, checkpoint.getOffset());
}).verifyComplete();
}
/**
* Tests that errors are propagated with {@link CheckpointStore
*/
@Test
public void testListCheckpointError() {
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.error(new SocketTimeoutException()));
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
StepVerifier.create(blobCheckpointStore.listCheckpoints("ns", "eh", "cg"))
.expectError(SocketTimeoutException.class)
.verify();
}
/**
* Tests that can update checkpoint.
*/
@Test
public void testUpdateCheckpoint() {
final String fullyQualifiedNamespace = "namespace.microsoft.com";
final String eventHubName = "MyEventHubName2";
final String consumerGroup = "$DefaultOne";
final String prefix = getLegacyPrefix(fullyQualifiedNamespace, eventHubName, consumerGroup);
final String partitionId = "1";
final String blobName = prefix + CHECKPOINT_PATH + partitionId;
final Checkpoint checkpoint = new Checkpoint()
.setFullyQualifiedNamespace(fullyQualifiedNamespace)
.setEventHubName(eventHubName)
.setConsumerGroup(consumerGroup)
.setPartitionId(partitionId)
.setSequenceNumber(2L)
.setOffset(100L);
final BlobItem blobItem = getCheckpointBlobItem("230", "1", blobName);
final PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.just(
new PagedResponseBase<HttpHeaders, BlobItem>(null, 200, null,
Collections.singletonList(blobItem), null, null)));
when(blobContainerAsyncClient.getBlobAsyncClient(blobName)).thenReturn(blobAsyncClient);
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
when(blobAsyncClient.getBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient);
when(blobAsyncClient.exists()).thenReturn(Mono.just(true));
when(blobAsyncClient.setMetadata(ArgumentMatchers.<Map<String, String>>any()))
.thenReturn(Mono.empty());
final BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.updateCheckpoint(checkpoint))
.verifyComplete();
}
/**
* Tests that errors are thrown if the checkpoint is invalid
*/
@Test
public void testUpdateCheckpointInvalid() {
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
assertThrows(IllegalStateException.class, () -> blobCheckpointStore.updateCheckpoint(null));
assertThrows(IllegalStateException.class, () -> blobCheckpointStore.updateCheckpoint(new Checkpoint()));
}
/**
* Tests that will update checkpoint if one does not exist.
*/
@Test
public void testUpdateCheckpointForNewPartition() {
final Checkpoint checkpoint = new Checkpoint()
.setFullyQualifiedNamespace("ns")
.setEventHubName("eh")
.setConsumerGroup("cg")
.setPartitionId("0")
.setSequenceNumber(2L)
.setOffset(100L);
final String legacyPrefix = getLegacyPrefix(checkpoint.getFullyQualifiedNamespace(),
checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
final String blobName = legacyPrefix + "/checkpoint/0";
HttpHeaders httpHeaders = new HttpHeaders();
httpHeaders.add(HttpHeaderName.ETAG, "etag2");
BlobItem blobItem = getCheckpointBlobItem("230", "1", blobName);
PagedFlux<BlobItem> response = new PagedFlux<BlobItem>(() -> Mono.just(new PagedResponseBase<HttpHeaders,
BlobItem>(null, 200, null,
Collections.singletonList(blobItem), null,
null)));
when(blobContainerAsyncClient.getBlobAsyncClient(blobName)).thenReturn(blobAsyncClient);
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
when(blobAsyncClient.getBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient);
when(blobAsyncClient.exists()).thenReturn(Mono.just(false));
when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L),
isNull(), anyMap(), isNull(), isNull(), isNull()))
.thenReturn(Mono.just(new ResponseBase<>(null, 200, httpHeaders, null, null)));
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.updateCheckpoint(checkpoint)).verifyComplete();
}
/**
* Tests claiming ownership on a partition that never had an entry.
*/
@Test
public void testClaimOwnership() {
PartitionOwnership po = createPartitionOwnership("ns", "eh", "cg", "1", "owner1");
HttpHeaders httpHeaders = new HttpHeaders();
httpHeaders.add(HttpHeaderName.ETAG, "etag2");
when(blobContainerAsyncClient.getBlobAsyncClient("ns/eh/cg/ownership/1")).thenReturn(blobAsyncClient);
when(blobAsyncClient.getBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient);
when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L),
isNull(), ArgumentMatchers.<Map<String, String>>any(), isNull(), isNull(),
any(BlobRequestConditions.class)))
.thenReturn(Mono.just(new ResponseBase<>(null, 200, httpHeaders, null, null)));
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.claimOwnership(Collections.singletonList(po)))
.assertNext(partitionOwnership -> {
assertEquals("owner1", partitionOwnership.getOwnerId());
assertEquals("1", partitionOwnership.getPartitionId());
assertEquals("eh", partitionOwnership.getEventHubName());
assertEquals("cg", partitionOwnership.getConsumerGroup());
assertEquals("etag2", partitionOwnership.getETag());
}).verifyComplete();
}
/**
* Tests claiming ownership on a previously owned partition.
*/
@Test
public void testClaimOwnershipExistingBlob() {
PartitionOwnership po = createPartitionOwnership("ns", "eh", "cg", "0", "owner1");
po.setETag("1");
HttpHeaders httpHeaders = new HttpHeaders();
httpHeaders.add(HttpHeaderName.ETAG, "2");
when(blobContainerAsyncClient.getBlobAsyncClient("ns/eh/cg/ownership/0")).thenReturn(blobAsyncClient);
when(blobAsyncClient
.setMetadataWithResponse(ArgumentMatchers.<Map<String, String>>any(), any(BlobRequestConditions.class)))
.thenReturn(Mono.just(new ResponseBase<>(null, 200, httpHeaders, null, null)));
when(blobAsyncClient.exists()).thenReturn(Mono.just(true));
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.claimOwnership(Collections.singletonList(po)))
.assertNext(partitionOwnership -> {
assertEquals("owner1", partitionOwnership.getOwnerId());
assertEquals("0", partitionOwnership.getPartitionId());
assertEquals("eh", partitionOwnership.getEventHubName());
assertEquals("cg", partitionOwnership.getConsumerGroup());
assertEquals("2", partitionOwnership.getETag());
}).verifyComplete();
}
/**
* Tests that a failed ownership claim returns normally instead of throwing exception downstream.
*/
@Test
public void testClaimOwnershipFailed() {
final String namespace = "foo.servicebus.windows.net";
final String eventHubName = "test-event-hub";
final String consumerGroup = "test-cg";
final String partitionId = "0";
final String ownerId = "owner-id-1";
final PartitionOwnership po =
createPartitionOwnership(namespace, eventHubName, consumerGroup, partitionId, ownerId);
final String ownershipPath = getLegacyPrefix(namespace, eventHubName, consumerGroup)
+ OWNERSHIP_PATH + partitionId;
when(blobContainerAsyncClient.getBlobAsyncClient(ownershipPath)).thenReturn(blobAsyncClient);
when(blobAsyncClient.getBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient);
when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L),
isNull(), ArgumentMatchers.<Map<String, String>>any(), isNull(), isNull(),
any(BlobRequestConditions.class)))
.thenReturn(Mono.error(new ResourceModifiedException("Etag did not match", null)));
final BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.claimOwnership(Collections.singletonList(po)))
.verifyComplete();
final PartitionOwnership po2 = createPartitionOwnership(namespace, eventHubName, consumerGroup, partitionId,
ownerId)
.setETag("1");
when(blobAsyncClient
.setMetadataWithResponse(ArgumentMatchers.<Map<String, String>>any(), any(BlobRequestConditions.class)))
.thenReturn(Mono.error(new ResourceModifiedException("Etag did not match", null)));
StepVerifier.create(blobCheckpointStore.claimOwnership(Collections.singletonList(po2)))
.verifyComplete();
final BlobContainerAsyncClient anotherContainerClient = mock(BlobContainerAsyncClient.class);
final BlobCheckpointStore anotherCheckpointStore = new BlobCheckpointStore(anotherContainerClient);
when(anotherContainerClient.getBlobAsyncClient(anyString())).thenReturn(null);
StepVerifier.create(anotherCheckpointStore.claimOwnership(Collections.singletonList(po)))
.verifyComplete();
}
/**
* Tests that an error is returned when {@link CheckpointStore
*/
@Test
public void testUpdateCheckpointError() {
Checkpoint checkpoint = new Checkpoint()
.setFullyQualifiedNamespace("ns")
.setEventHubName("eh")
.setConsumerGroup("cg")
.setPartitionId("0")
.setSequenceNumber(2L)
.setOffset(100L);
when(blobContainerAsyncClient.getBlobAsyncClient("ns/eh/cg/checkpoint/0")).thenReturn(blobAsyncClient);
when(blobAsyncClient.exists()).thenReturn(Mono.just(true));
when(blobAsyncClient.setMetadata(ArgumentMatchers.<Map<String, String>>any()))
.thenReturn(Mono.error(new SocketTimeoutException()));
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.updateCheckpoint(checkpoint))
.expectError(SocketTimeoutException.class).verify();
}
private static PartitionOwnership createPartitionOwnership(String fullyQualifiedNamespace, String eventHubName,
String consumerGroupName, String partitionId, String ownerId) {
return new PartitionOwnership()
.setFullyQualifiedNamespace(fullyQualifiedNamespace)
.setEventHubName(eventHubName)
.setConsumerGroup(consumerGroupName)
.setPartitionId(partitionId)
.setOwnerId(ownerId);
}
private static BlobItem getOwnershipBlobItem(String owner, String etag, String blobName) {
Map<String, String> metadata = new HashMap<>();
metadata.put(OWNER_ID, owner);
BlobItemProperties properties = new BlobItemProperties()
.setLastModified(OffsetDateTime.now())
.setETag(etag);
return new BlobItem()
.setName(blobName)
.setMetadata(metadata)
.setProperties(properties);
}
private static BlobItem getCheckpointBlobItem(String offset, String sequenceNumber, String blobName) {
Map<String, String> metadata = new HashMap<>();
metadata.put(SEQUENCE_NUMBER, sequenceNumber);
metadata.put(OFFSET, offset);
return new BlobItem()
.setName(blobName)
.setMetadata(metadata);
}
private static String getLegacyPrefix(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return String.join("/", fullyQualifiedNamespace, eventHubName, consumerGroup);
}
} | class BlobCheckpointStoreTests {
@Mock
private BlobContainerAsyncClient blobContainerAsyncClient;
@Mock
private BlockBlobAsyncClient blockBlobAsyncClient;
@Mock
private BlobAsyncClient blobAsyncClient;
private AutoCloseable autoCloseable;
@BeforeEach
public void beforeEach() {
this.autoCloseable = MockitoAnnotations.openMocks(this);
}
@AfterEach
public void afterEach() throws Exception {
if (autoCloseable != null) {
autoCloseable.close();
}
Mockito.framework().clearInlineMock(this);
}
/**
* Tests that listing ownership works.
*/
@Test
/**
* Tests that errors are propagated with {@link CheckpointStore
*/
@Test
public void testListOwnershipError() {
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.error(new SocketTimeoutException()));
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
StepVerifier.create(blobCheckpointStore.listOwnership("ns", "eh", "cg"))
.expectError(SocketTimeoutException.class).verify();
}
/**
* Verifies that it lists checkpoints.
*/
@Test
public void testListCheckpoint() {
final String fullyQualifiedNamespace = "namespace.microsoft.com";
final String eventHubName = "MyEventHubName";
final String consumerGroup = "$Default";
final String prefix = getLegacyPrefix(fullyQualifiedNamespace, eventHubName, consumerGroup);
final String checkpointPrefix = prefix + CHECKPOINT_PATH;
final BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
final BlobItem blobItem = getCheckpointBlobItem("230", "1", checkpointPrefix + "0");
final BlobItem blobItem2 = new BlobItem().setName(checkpointPrefix + "1");
final BlobItem blobItem3 = getCheckpointBlobItem("233", "3", prefix + "1");
final PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.just(
new PagedResponseBase<HttpHeaders, BlobItem>(null, 200, null,
Arrays.asList(blobItem, blobItem2, blobItem3), null, null)));
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenAnswer(invocation -> {
final ListBlobsOptions listBlobsOptions = invocation.getArgument(0);
final String arg = listBlobsOptions.getPrefix();
if (checkpointPrefix.equals(arg)) {
return response;
} else {
return Flux.error(new IllegalArgumentException("Did not expect this prefix: " + arg));
}
});
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
StepVerifier.create(blobCheckpointStore.listCheckpoints(fullyQualifiedNamespace, eventHubName, consumerGroup))
.assertNext(checkpoint -> {
assertEquals("0", checkpoint.getPartitionId());
assertEquals(eventHubName, checkpoint.getEventHubName());
assertEquals(consumerGroup, checkpoint.getConsumerGroup());
assertEquals(1L, checkpoint.getSequenceNumber());
assertEquals(230L, checkpoint.getOffset());
}).verifyComplete();
}
/**
* Tests that errors are propagated with {@link CheckpointStore
*/
@Test
public void testListCheckpointError() {
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.error(new SocketTimeoutException()));
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
StepVerifier.create(blobCheckpointStore.listCheckpoints("ns", "eh", "cg"))
.expectError(SocketTimeoutException.class)
.verify();
}
/**
* Tests that can update checkpoint.
*/
@Test
public void testUpdateCheckpoint() {
final String fullyQualifiedNamespace = "namespace.microsoft.com";
final String eventHubName = "MyEventHubName2";
final String consumerGroup = "$DefaultOne";
final String prefix = getLegacyPrefix(fullyQualifiedNamespace, eventHubName, consumerGroup);
final String partitionId = "1";
final String blobName = prefix + CHECKPOINT_PATH + partitionId;
final Checkpoint checkpoint = new Checkpoint()
.setFullyQualifiedNamespace(fullyQualifiedNamespace)
.setEventHubName(eventHubName)
.setConsumerGroup(consumerGroup)
.setPartitionId(partitionId)
.setSequenceNumber(2L)
.setOffset(100L);
final BlobItem blobItem = getCheckpointBlobItem("230", "1", blobName);
final PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.just(
new PagedResponseBase<HttpHeaders, BlobItem>(null, 200, null,
Collections.singletonList(blobItem), null, null)));
when(blobContainerAsyncClient.getBlobAsyncClient(blobName)).thenReturn(blobAsyncClient);
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
when(blobAsyncClient.getBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient);
when(blobAsyncClient.exists()).thenReturn(Mono.just(true));
when(blobAsyncClient.setMetadata(ArgumentMatchers.<Map<String, String>>any()))
.thenReturn(Mono.empty());
final BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.updateCheckpoint(checkpoint))
.verifyComplete();
}
/**
* Tests that errors are thrown if the checkpoint is invalid
*/
@Test
public void testUpdateCheckpointInvalid() {
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
assertThrows(IllegalStateException.class, () -> blobCheckpointStore.updateCheckpoint(null));
assertThrows(IllegalStateException.class, () -> blobCheckpointStore.updateCheckpoint(new Checkpoint()));
}
/**
* Tests that will update checkpoint if one does not exist.
*/
@Test
public void testUpdateCheckpointForNewPartition() {
final Checkpoint checkpoint = new Checkpoint()
.setFullyQualifiedNamespace("ns")
.setEventHubName("eh")
.setConsumerGroup("cg")
.setPartitionId("0")
.setSequenceNumber(2L)
.setOffset(100L);
final String legacyPrefix = getLegacyPrefix(checkpoint.getFullyQualifiedNamespace(),
checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
final String blobName = legacyPrefix + CHECKPOINT_PATH + checkpoint.getPartitionId();
HttpHeaders httpHeaders = new HttpHeaders();
httpHeaders.add(HttpHeaderName.ETAG, "etag2");
BlobItem blobItem = getCheckpointBlobItem("230", "1", blobName);
PagedFlux<BlobItem> response = new PagedFlux<BlobItem>(() -> Mono.just(new PagedResponseBase<HttpHeaders,
BlobItem>(null, 200, null,
Collections.singletonList(blobItem), null,
null)));
when(blobContainerAsyncClient.getBlobAsyncClient(blobName)).thenReturn(blobAsyncClient);
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
when(blobAsyncClient.getBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient);
when(blobAsyncClient.exists()).thenReturn(Mono.just(false));
when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L),
isNull(), anyMap(), isNull(), isNull(), isNull()))
.thenReturn(Mono.just(new ResponseBase<>(null, 200, httpHeaders, null, null)));
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.updateCheckpoint(checkpoint)).verifyComplete();
}
/**
* Tests claiming ownership on a partition that never had an entry.
*/
@Test
public void testClaimOwnership() {
PartitionOwnership po = createPartitionOwnership("ns", "eh", "cg", "1", "owner1");
HttpHeaders httpHeaders = new HttpHeaders();
httpHeaders.add(HttpHeaderName.ETAG, "etag2");
when(blobContainerAsyncClient.getBlobAsyncClient("ns/eh/cg/ownership/1")).thenReturn(blobAsyncClient);
when(blobAsyncClient.getBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient);
when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L),
isNull(), ArgumentMatchers.<Map<String, String>>any(), isNull(), isNull(),
any(BlobRequestConditions.class)))
.thenReturn(Mono.just(new ResponseBase<>(null, 200, httpHeaders, null, null)));
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.claimOwnership(Collections.singletonList(po)))
.assertNext(partitionOwnership -> {
assertEquals("owner1", partitionOwnership.getOwnerId());
assertEquals("1", partitionOwnership.getPartitionId());
assertEquals("eh", partitionOwnership.getEventHubName());
assertEquals("cg", partitionOwnership.getConsumerGroup());
assertEquals("etag2", partitionOwnership.getETag());
}).verifyComplete();
}
/**
* Tests claiming ownership on a previously owned partition.
*/
@Test
public void testClaimOwnershipExistingBlob() {
PartitionOwnership po = createPartitionOwnership("ns", "eh", "cg", "0", "owner1");
po.setETag("1");
HttpHeaders httpHeaders = new HttpHeaders();
httpHeaders.add(HttpHeaderName.ETAG, "2");
when(blobContainerAsyncClient.getBlobAsyncClient("ns/eh/cg/ownership/0")).thenReturn(blobAsyncClient);
when(blobAsyncClient
.setMetadataWithResponse(ArgumentMatchers.<Map<String, String>>any(), any(BlobRequestConditions.class)))
.thenReturn(Mono.just(new ResponseBase<>(null, 200, httpHeaders, null, null)));
when(blobAsyncClient.exists()).thenReturn(Mono.just(true));
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.claimOwnership(Collections.singletonList(po)))
.assertNext(partitionOwnership -> {
assertEquals("owner1", partitionOwnership.getOwnerId());
assertEquals("0", partitionOwnership.getPartitionId());
assertEquals("eh", partitionOwnership.getEventHubName());
assertEquals("cg", partitionOwnership.getConsumerGroup());
assertEquals("2", partitionOwnership.getETag());
}).verifyComplete();
}
/**
* Tests that a failed ownership claim returns normally instead of throwing exception downstream.
*/
@Test
public void testClaimOwnershipFailed() {
final String namespace = "foo.servicebus.windows.net";
final String eventHubName = "test-event-hub";
final String consumerGroup = "test-cg";
final String partitionId = "0";
final String ownerId = "owner-id-1";
final PartitionOwnership po =
createPartitionOwnership(namespace, eventHubName, consumerGroup, partitionId, ownerId);
final String ownershipPath = getLegacyPrefix(namespace, eventHubName, consumerGroup)
+ OWNERSHIP_PATH + partitionId;
when(blobContainerAsyncClient.getBlobAsyncClient(ownershipPath)).thenReturn(blobAsyncClient);
when(blobAsyncClient.getBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient);
when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L),
isNull(), ArgumentMatchers.<Map<String, String>>any(), isNull(), isNull(),
any(BlobRequestConditions.class)))
.thenReturn(Mono.error(new ResourceModifiedException("Etag did not match", null)));
final BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.claimOwnership(Collections.singletonList(po)))
.verifyComplete();
final PartitionOwnership po2 = createPartitionOwnership(namespace, eventHubName, consumerGroup, partitionId,
ownerId)
.setETag("1");
when(blobAsyncClient
.setMetadataWithResponse(ArgumentMatchers.<Map<String, String>>any(), any(BlobRequestConditions.class)))
.thenReturn(Mono.error(new ResourceModifiedException("Etag did not match", null)));
StepVerifier.create(blobCheckpointStore.claimOwnership(Collections.singletonList(po2)))
.verifyComplete();
final BlobContainerAsyncClient anotherContainerClient = mock(BlobContainerAsyncClient.class);
final BlobCheckpointStore anotherCheckpointStore = new BlobCheckpointStore(anotherContainerClient);
when(anotherContainerClient.getBlobAsyncClient(anyString())).thenReturn(null);
StepVerifier.create(anotherCheckpointStore.claimOwnership(Collections.singletonList(po)))
.verifyComplete();
}
/**
* Tests that an error is returned when {@link CheckpointStore
*/
@Test
public void testUpdateCheckpointError() {
Checkpoint checkpoint = new Checkpoint()
.setFullyQualifiedNamespace("ns")
.setEventHubName("eh")
.setConsumerGroup("cg")
.setPartitionId("0")
.setSequenceNumber(2L)
.setOffset(100L);
when(blobContainerAsyncClient.getBlobAsyncClient("ns/eh/cg/checkpoint/0")).thenReturn(blobAsyncClient);
when(blobAsyncClient.exists()).thenReturn(Mono.just(true));
when(blobAsyncClient.setMetadata(ArgumentMatchers.<Map<String, String>>any()))
.thenReturn(Mono.error(new SocketTimeoutException()));
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.updateCheckpoint(checkpoint))
.expectError(SocketTimeoutException.class).verify();
}
private static PartitionOwnership createPartitionOwnership(String fullyQualifiedNamespace, String eventHubName,
String consumerGroupName, String partitionId, String ownerId) {
return new PartitionOwnership()
.setFullyQualifiedNamespace(fullyQualifiedNamespace)
.setEventHubName(eventHubName)
.setConsumerGroup(consumerGroupName)
.setPartitionId(partitionId)
.setOwnerId(ownerId);
}
private static BlobItem getOwnershipBlobItem(String owner, String etag, String blobName) {
Map<String, String> metadata = new HashMap<>();
metadata.put(OWNER_ID, owner);
BlobItemProperties properties = new BlobItemProperties()
.setLastModified(OffsetDateTime.now())
.setETag(etag);
return new BlobItem()
.setName(blobName)
.setMetadata(metadata)
.setProperties(properties);
}
private static BlobItem getCheckpointBlobItem(String offset, String sequenceNumber, String blobName) {
Map<String, String> metadata = new HashMap<>();
metadata.put(SEQUENCE_NUMBER, sequenceNumber);
metadata.put(OFFSET, offset);
return new BlobItem()
.setName(blobName)
.setMetadata(metadata);
}
private static String getLegacyPrefix(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return String.join("/", fullyQualifiedNamespace, eventHubName, consumerGroup);
}
} |
nit; `final String blobName = legacyPrefix + CHECKPOINT_PATH + "0"` | public void testUpdateCheckpointForNewPartition() {
final Checkpoint checkpoint = new Checkpoint()
.setFullyQualifiedNamespace("ns")
.setEventHubName("eh")
.setConsumerGroup("cg")
.setPartitionId("0")
.setSequenceNumber(2L)
.setOffset(100L);
final String legacyPrefix = getLegacyPrefix(checkpoint.getFullyQualifiedNamespace(),
checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
final String blobName = legacyPrefix + "/checkpoint/0";
HttpHeaders httpHeaders = new HttpHeaders();
httpHeaders.add(HttpHeaderName.ETAG, "etag2");
BlobItem blobItem = getCheckpointBlobItem("230", "1", blobName);
PagedFlux<BlobItem> response = new PagedFlux<BlobItem>(() -> Mono.just(new PagedResponseBase<HttpHeaders,
BlobItem>(null, 200, null,
Collections.singletonList(blobItem), null,
null)));
when(blobContainerAsyncClient.getBlobAsyncClient(blobName)).thenReturn(blobAsyncClient);
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
when(blobAsyncClient.getBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient);
when(blobAsyncClient.exists()).thenReturn(Mono.just(false));
when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L),
isNull(), anyMap(), isNull(), isNull(), isNull()))
.thenReturn(Mono.just(new ResponseBase<>(null, 200, httpHeaders, null, null)));
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.updateCheckpoint(checkpoint)).verifyComplete();
} | final String blobName = legacyPrefix + "/checkpoint/0"; | public void testUpdateCheckpointForNewPartition() {
final Checkpoint checkpoint = new Checkpoint()
.setFullyQualifiedNamespace("ns")
.setEventHubName("eh")
.setConsumerGroup("cg")
.setPartitionId("0")
.setSequenceNumber(2L)
.setOffset(100L);
final String legacyPrefix = getLegacyPrefix(checkpoint.getFullyQualifiedNamespace(),
checkpoint.getEventHubName(), checkpoint.getConsumerGroup());
final String blobName = legacyPrefix + CHECKPOINT_PATH + checkpoint.getPartitionId();
HttpHeaders httpHeaders = new HttpHeaders();
httpHeaders.add(HttpHeaderName.ETAG, "etag2");
BlobItem blobItem = getCheckpointBlobItem("230", "1", blobName);
PagedFlux<BlobItem> response = new PagedFlux<BlobItem>(() -> Mono.just(new PagedResponseBase<HttpHeaders,
BlobItem>(null, 200, null,
Collections.singletonList(blobItem), null,
null)));
when(blobContainerAsyncClient.getBlobAsyncClient(blobName)).thenReturn(blobAsyncClient);
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
when(blobAsyncClient.getBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient);
when(blobAsyncClient.exists()).thenReturn(Mono.just(false));
when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L),
isNull(), anyMap(), isNull(), isNull(), isNull()))
.thenReturn(Mono.just(new ResponseBase<>(null, 200, httpHeaders, null, null)));
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.updateCheckpoint(checkpoint)).verifyComplete();
} | class BlobCheckpointStoreTests {
@Mock
private BlobContainerAsyncClient blobContainerAsyncClient;
@Mock
private BlockBlobAsyncClient blockBlobAsyncClient;
@Mock
private BlobAsyncClient blobAsyncClient;
private AutoCloseable autoCloseable;
@BeforeEach
public void beforeEach() {
this.autoCloseable = MockitoAnnotations.openMocks(this);
}
@AfterEach
public void afterEach() throws Exception {
if (autoCloseable != null) {
autoCloseable.close();
}
Mockito.framework().clearInlineMock(this);
}
/**
* Tests that listing ownership works.
*/
@Test
public void testListOwnership() {
final String fullyQualifiedNamespace = "namespace.microsoft.com";
final String eventHubName = "MyEventHubName";
final String consumerGroup = "$Default";
final String prefix = getLegacyPrefix(fullyQualifiedNamespace, eventHubName, consumerGroup);
final String ownershipPrefix = prefix + "/ownership/";
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
BlobItem blobItem = getOwnershipBlobItem("owner1", "etag", ownershipPrefix + "0");
BlobItem blobItem2 = getOwnershipBlobItem("owner1", "etag", prefix + "/0");
BlobItem blobItem3 = new BlobItem().setName(ownershipPrefix + "5");
BlobItem blobItem4 = getOwnershipBlobItem(null, "2", ownershipPrefix + "2");
PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.just(new PagedResponseBase<HttpHeaders,
BlobItem>(null, 200, null,
Arrays.asList(blobItem, blobItem2, blobItem3, blobItem4), null,
null)));
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenAnswer(invocation -> {
final ListBlobsOptions argument = invocation.getArgument(0);
final String arg = argument.getPrefix();
if (ownershipPrefix.equals(arg)) {
return response;
} else {
return Flux.error(new IllegalArgumentException("Did not expect this prefix: " + arg));
}
});
StepVerifier.create(blobCheckpointStore.listOwnership(fullyQualifiedNamespace, eventHubName, consumerGroup))
.assertNext(partitionOwnership -> {
assertEquals("owner1", partitionOwnership.getOwnerId());
assertEquals("0", partitionOwnership.getPartitionId());
assertEquals(eventHubName, partitionOwnership.getEventHubName());
assertEquals(consumerGroup, partitionOwnership.getConsumerGroup());
assertEquals("etag", partitionOwnership.getETag());
assertEquals(fullyQualifiedNamespace, partitionOwnership.getFullyQualifiedNamespace());
})
.assertNext(partitionOwnership -> {
assertEquals("", partitionOwnership.getOwnerId());
assertEquals("2", partitionOwnership.getPartitionId());
assertEquals(eventHubName, partitionOwnership.getEventHubName());
assertEquals(consumerGroup, partitionOwnership.getConsumerGroup());
assertEquals("2", partitionOwnership.getETag());
assertEquals(fullyQualifiedNamespace, partitionOwnership.getFullyQualifiedNamespace());
}).verifyComplete();
}
/**
* Tests that errors are propagated with {@link CheckpointStore
*/
@Test
public void testListOwnershipError() {
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.error(new SocketTimeoutException()));
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
StepVerifier.create(blobCheckpointStore.listOwnership("ns", "eh", "cg"))
.expectError(SocketTimeoutException.class).verify();
}
/**
* Verifies that it lists checkpoints.
*/
@Test
public void testListCheckpoint() {
final String fullyQualifiedNamespace = "namespace.microsoft.com";
final String eventHubName = "MyEventHubName";
final String consumerGroup = "$Default";
final String prefix = getLegacyPrefix(fullyQualifiedNamespace, eventHubName, consumerGroup);
final String checkpointPrefix = prefix + CHECKPOINT_PATH;
final BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
final BlobItem blobItem = getCheckpointBlobItem("230", "1", checkpointPrefix + "0");
final BlobItem blobItem2 = new BlobItem().setName(checkpointPrefix + "1");
final BlobItem blobItem3 = getCheckpointBlobItem("233", "3", prefix + "1");
final PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.just(
new PagedResponseBase<HttpHeaders, BlobItem>(null, 200, null,
Arrays.asList(blobItem, blobItem2, blobItem3), null, null)));
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenAnswer(invocation -> {
final ListBlobsOptions listBlobsOptions = invocation.getArgument(0);
final String arg = listBlobsOptions.getPrefix();
if (checkpointPrefix.equals(arg)) {
return response;
} else {
return Flux.error(new IllegalArgumentException("Did not expect this prefix: " + arg));
}
});
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
StepVerifier.create(blobCheckpointStore.listCheckpoints(fullyQualifiedNamespace, eventHubName, consumerGroup))
.assertNext(checkpoint -> {
assertEquals("0", checkpoint.getPartitionId());
assertEquals(eventHubName, checkpoint.getEventHubName());
assertEquals(consumerGroup, checkpoint.getConsumerGroup());
assertEquals(1L, checkpoint.getSequenceNumber());
assertEquals(230L, checkpoint.getOffset());
}).verifyComplete();
}
/**
* Tests that errors are propagated with {@link CheckpointStore
*/
@Test
public void testListCheckpointError() {
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.error(new SocketTimeoutException()));
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
StepVerifier.create(blobCheckpointStore.listCheckpoints("ns", "eh", "cg"))
.expectError(SocketTimeoutException.class)
.verify();
}
/**
* Tests that can update checkpoint.
*/
@Test
public void testUpdateCheckpoint() {
final String fullyQualifiedNamespace = "namespace.microsoft.com";
final String eventHubName = "MyEventHubName2";
final String consumerGroup = "$DefaultOne";
final String prefix = getLegacyPrefix(fullyQualifiedNamespace, eventHubName, consumerGroup);
final String partitionId = "1";
final String blobName = prefix + CHECKPOINT_PATH + partitionId;
final Checkpoint checkpoint = new Checkpoint()
.setFullyQualifiedNamespace(fullyQualifiedNamespace)
.setEventHubName(eventHubName)
.setConsumerGroup(consumerGroup)
.setPartitionId(partitionId)
.setSequenceNumber(2L)
.setOffset(100L);
final BlobItem blobItem = getCheckpointBlobItem("230", "1", blobName);
final PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.just(
new PagedResponseBase<HttpHeaders, BlobItem>(null, 200, null,
Collections.singletonList(blobItem), null, null)));
when(blobContainerAsyncClient.getBlobAsyncClient(blobName)).thenReturn(blobAsyncClient);
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
when(blobAsyncClient.getBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient);
when(blobAsyncClient.exists()).thenReturn(Mono.just(true));
when(blobAsyncClient.setMetadata(ArgumentMatchers.<Map<String, String>>any()))
.thenReturn(Mono.empty());
final BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.updateCheckpoint(checkpoint))
.verifyComplete();
}
/**
* Tests that errors are thrown if the checkpoint is invalid
*/
@Test
public void testUpdateCheckpointInvalid() {
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
assertThrows(IllegalStateException.class, () -> blobCheckpointStore.updateCheckpoint(null));
assertThrows(IllegalStateException.class, () -> blobCheckpointStore.updateCheckpoint(new Checkpoint()));
}
/**
* Tests that will update checkpoint if one does not exist.
*/
@Test
/**
* Tests claiming ownership on a partition that never had an entry.
*/
@Test
public void testClaimOwnership() {
PartitionOwnership po = createPartitionOwnership("ns", "eh", "cg", "1", "owner1");
HttpHeaders httpHeaders = new HttpHeaders();
httpHeaders.add(HttpHeaderName.ETAG, "etag2");
when(blobContainerAsyncClient.getBlobAsyncClient("ns/eh/cg/ownership/1")).thenReturn(blobAsyncClient);
when(blobAsyncClient.getBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient);
when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L),
isNull(), ArgumentMatchers.<Map<String, String>>any(), isNull(), isNull(),
any(BlobRequestConditions.class)))
.thenReturn(Mono.just(new ResponseBase<>(null, 200, httpHeaders, null, null)));
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.claimOwnership(Collections.singletonList(po)))
.assertNext(partitionOwnership -> {
assertEquals("owner1", partitionOwnership.getOwnerId());
assertEquals("1", partitionOwnership.getPartitionId());
assertEquals("eh", partitionOwnership.getEventHubName());
assertEquals("cg", partitionOwnership.getConsumerGroup());
assertEquals("etag2", partitionOwnership.getETag());
}).verifyComplete();
}
/**
* Tests claiming ownership on a previously owned partition.
*/
@Test
public void testClaimOwnershipExistingBlob() {
PartitionOwnership po = createPartitionOwnership("ns", "eh", "cg", "0", "owner1");
po.setETag("1");
HttpHeaders httpHeaders = new HttpHeaders();
httpHeaders.add(HttpHeaderName.ETAG, "2");
when(blobContainerAsyncClient.getBlobAsyncClient("ns/eh/cg/ownership/0")).thenReturn(blobAsyncClient);
when(blobAsyncClient
.setMetadataWithResponse(ArgumentMatchers.<Map<String, String>>any(), any(BlobRequestConditions.class)))
.thenReturn(Mono.just(new ResponseBase<>(null, 200, httpHeaders, null, null)));
when(blobAsyncClient.exists()).thenReturn(Mono.just(true));
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.claimOwnership(Collections.singletonList(po)))
.assertNext(partitionOwnership -> {
assertEquals("owner1", partitionOwnership.getOwnerId());
assertEquals("0", partitionOwnership.getPartitionId());
assertEquals("eh", partitionOwnership.getEventHubName());
assertEquals("cg", partitionOwnership.getConsumerGroup());
assertEquals("2", partitionOwnership.getETag());
}).verifyComplete();
}
/**
* Tests that a failed ownership claim returns normally instead of throwing exception downstream.
*/
@Test
public void testClaimOwnershipFailed() {
final String namespace = "foo.servicebus.windows.net";
final String eventHubName = "test-event-hub";
final String consumerGroup = "test-cg";
final String partitionId = "0";
final String ownerId = "owner-id-1";
final PartitionOwnership po =
createPartitionOwnership(namespace, eventHubName, consumerGroup, partitionId, ownerId);
final String ownershipPath = getLegacyPrefix(namespace, eventHubName, consumerGroup)
+ OWNERSHIP_PATH + partitionId;
when(blobContainerAsyncClient.getBlobAsyncClient(ownershipPath)).thenReturn(blobAsyncClient);
when(blobAsyncClient.getBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient);
when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L),
isNull(), ArgumentMatchers.<Map<String, String>>any(), isNull(), isNull(),
any(BlobRequestConditions.class)))
.thenReturn(Mono.error(new ResourceModifiedException("Etag did not match", null)));
final BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.claimOwnership(Collections.singletonList(po)))
.verifyComplete();
final PartitionOwnership po2 = createPartitionOwnership(namespace, eventHubName, consumerGroup, partitionId,
ownerId)
.setETag("1");
when(blobAsyncClient
.setMetadataWithResponse(ArgumentMatchers.<Map<String, String>>any(), any(BlobRequestConditions.class)))
.thenReturn(Mono.error(new ResourceModifiedException("Etag did not match", null)));
StepVerifier.create(blobCheckpointStore.claimOwnership(Collections.singletonList(po2)))
.verifyComplete();
final BlobContainerAsyncClient anotherContainerClient = mock(BlobContainerAsyncClient.class);
final BlobCheckpointStore anotherCheckpointStore = new BlobCheckpointStore(anotherContainerClient);
when(anotherContainerClient.getBlobAsyncClient(anyString())).thenReturn(null);
StepVerifier.create(anotherCheckpointStore.claimOwnership(Collections.singletonList(po)))
.verifyComplete();
}
/**
* Tests that an error is returned when {@link CheckpointStore
*/
@Test
public void testUpdateCheckpointError() {
Checkpoint checkpoint = new Checkpoint()
.setFullyQualifiedNamespace("ns")
.setEventHubName("eh")
.setConsumerGroup("cg")
.setPartitionId("0")
.setSequenceNumber(2L)
.setOffset(100L);
when(blobContainerAsyncClient.getBlobAsyncClient("ns/eh/cg/checkpoint/0")).thenReturn(blobAsyncClient);
when(blobAsyncClient.exists()).thenReturn(Mono.just(true));
when(blobAsyncClient.setMetadata(ArgumentMatchers.<Map<String, String>>any()))
.thenReturn(Mono.error(new SocketTimeoutException()));
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.updateCheckpoint(checkpoint))
.expectError(SocketTimeoutException.class).verify();
}
private static PartitionOwnership createPartitionOwnership(String fullyQualifiedNamespace, String eventHubName,
String consumerGroupName, String partitionId, String ownerId) {
return new PartitionOwnership()
.setFullyQualifiedNamespace(fullyQualifiedNamespace)
.setEventHubName(eventHubName)
.setConsumerGroup(consumerGroupName)
.setPartitionId(partitionId)
.setOwnerId(ownerId);
}
private static BlobItem getOwnershipBlobItem(String owner, String etag, String blobName) {
Map<String, String> metadata = new HashMap<>();
metadata.put(OWNER_ID, owner);
BlobItemProperties properties = new BlobItemProperties()
.setLastModified(OffsetDateTime.now())
.setETag(etag);
return new BlobItem()
.setName(blobName)
.setMetadata(metadata)
.setProperties(properties);
}
private static BlobItem getCheckpointBlobItem(String offset, String sequenceNumber, String blobName) {
Map<String, String> metadata = new HashMap<>();
metadata.put(SEQUENCE_NUMBER, sequenceNumber);
metadata.put(OFFSET, offset);
return new BlobItem()
.setName(blobName)
.setMetadata(metadata);
}
private static String getLegacyPrefix(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return String.join("/", fullyQualifiedNamespace, eventHubName, consumerGroup);
}
} | class BlobCheckpointStoreTests {
@Mock
private BlobContainerAsyncClient blobContainerAsyncClient;
@Mock
private BlockBlobAsyncClient blockBlobAsyncClient;
@Mock
private BlobAsyncClient blobAsyncClient;
private AutoCloseable autoCloseable;
@BeforeEach
public void beforeEach() {
this.autoCloseable = MockitoAnnotations.openMocks(this);
}
@AfterEach
public void afterEach() throws Exception {
if (autoCloseable != null) {
autoCloseable.close();
}
Mockito.framework().clearInlineMock(this);
}
/**
* Tests that listing ownership works.
*/
@Test
public void testListOwnership() {
final String fullyQualifiedNamespace = "namespace.microsoft.com";
final String eventHubName = "MyEventHubName";
final String consumerGroup = "$Default";
final String prefix = getLegacyPrefix(fullyQualifiedNamespace, eventHubName, consumerGroup);
final String ownershipPrefix = prefix + OWNERSHIP_PATH;
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
BlobItem blobItem = getOwnershipBlobItem("owner1", "etag", ownershipPrefix + "0");
BlobItem blobItem2 = getOwnershipBlobItem("owner1", "etag", prefix + "/0");
BlobItem blobItem3 = new BlobItem().setName(ownershipPrefix + "5");
BlobItem blobItem4 = getOwnershipBlobItem(null, "2", ownershipPrefix + "2");
PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.just(new PagedResponseBase<HttpHeaders,
BlobItem>(null, 200, null,
Arrays.asList(blobItem, blobItem2, blobItem3, blobItem4), null,
null)));
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenAnswer(invocation -> {
final ListBlobsOptions argument = invocation.getArgument(0);
final String arg = argument.getPrefix();
if (ownershipPrefix.equals(arg)) {
return response;
} else {
return Flux.error(new IllegalArgumentException("Did not expect this prefix: " + arg));
}
});
StepVerifier.create(blobCheckpointStore.listOwnership(fullyQualifiedNamespace, eventHubName, consumerGroup))
.assertNext(partitionOwnership -> {
assertEquals("owner1", partitionOwnership.getOwnerId());
assertEquals("0", partitionOwnership.getPartitionId());
assertEquals(eventHubName, partitionOwnership.getEventHubName());
assertEquals(consumerGroup, partitionOwnership.getConsumerGroup());
assertEquals("etag", partitionOwnership.getETag());
assertEquals(fullyQualifiedNamespace, partitionOwnership.getFullyQualifiedNamespace());
})
.assertNext(partitionOwnership -> {
assertEquals("", partitionOwnership.getOwnerId());
assertEquals("2", partitionOwnership.getPartitionId());
assertEquals(eventHubName, partitionOwnership.getEventHubName());
assertEquals(consumerGroup, partitionOwnership.getConsumerGroup());
assertEquals("2", partitionOwnership.getETag());
assertEquals(fullyQualifiedNamespace, partitionOwnership.getFullyQualifiedNamespace());
}).verifyComplete();
}
/**
* Tests that errors are propagated with {@link CheckpointStore
*/
@Test
public void testListOwnershipError() {
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.error(new SocketTimeoutException()));
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
StepVerifier.create(blobCheckpointStore.listOwnership("ns", "eh", "cg"))
.expectError(SocketTimeoutException.class).verify();
}
/**
* Verifies that it lists checkpoints.
*/
@Test
public void testListCheckpoint() {
final String fullyQualifiedNamespace = "namespace.microsoft.com";
final String eventHubName = "MyEventHubName";
final String consumerGroup = "$Default";
final String prefix = getLegacyPrefix(fullyQualifiedNamespace, eventHubName, consumerGroup);
final String checkpointPrefix = prefix + CHECKPOINT_PATH;
final BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
final BlobItem blobItem = getCheckpointBlobItem("230", "1", checkpointPrefix + "0");
final BlobItem blobItem2 = new BlobItem().setName(checkpointPrefix + "1");
final BlobItem blobItem3 = getCheckpointBlobItem("233", "3", prefix + "1");
final PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.just(
new PagedResponseBase<HttpHeaders, BlobItem>(null, 200, null,
Arrays.asList(blobItem, blobItem2, blobItem3), null, null)));
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenAnswer(invocation -> {
final ListBlobsOptions listBlobsOptions = invocation.getArgument(0);
final String arg = listBlobsOptions.getPrefix();
if (checkpointPrefix.equals(arg)) {
return response;
} else {
return Flux.error(new IllegalArgumentException("Did not expect this prefix: " + arg));
}
});
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
StepVerifier.create(blobCheckpointStore.listCheckpoints(fullyQualifiedNamespace, eventHubName, consumerGroup))
.assertNext(checkpoint -> {
assertEquals("0", checkpoint.getPartitionId());
assertEquals(eventHubName, checkpoint.getEventHubName());
assertEquals(consumerGroup, checkpoint.getConsumerGroup());
assertEquals(1L, checkpoint.getSequenceNumber());
assertEquals(230L, checkpoint.getOffset());
}).verifyComplete();
}
/**
* Tests that errors are propagated with {@link CheckpointStore
*/
@Test
public void testListCheckpointError() {
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.error(new SocketTimeoutException()));
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
StepVerifier.create(blobCheckpointStore.listCheckpoints("ns", "eh", "cg"))
.expectError(SocketTimeoutException.class)
.verify();
}
/**
* Tests that can update checkpoint.
*/
@Test
public void testUpdateCheckpoint() {
final String fullyQualifiedNamespace = "namespace.microsoft.com";
final String eventHubName = "MyEventHubName2";
final String consumerGroup = "$DefaultOne";
final String prefix = getLegacyPrefix(fullyQualifiedNamespace, eventHubName, consumerGroup);
final String partitionId = "1";
final String blobName = prefix + CHECKPOINT_PATH + partitionId;
final Checkpoint checkpoint = new Checkpoint()
.setFullyQualifiedNamespace(fullyQualifiedNamespace)
.setEventHubName(eventHubName)
.setConsumerGroup(consumerGroup)
.setPartitionId(partitionId)
.setSequenceNumber(2L)
.setOffset(100L);
final BlobItem blobItem = getCheckpointBlobItem("230", "1", blobName);
final PagedFlux<BlobItem> response = new PagedFlux<>(() -> Mono.just(
new PagedResponseBase<HttpHeaders, BlobItem>(null, 200, null,
Collections.singletonList(blobItem), null, null)));
when(blobContainerAsyncClient.getBlobAsyncClient(blobName)).thenReturn(blobAsyncClient);
when(blobContainerAsyncClient.listBlobs(any(ListBlobsOptions.class))).thenReturn(response);
when(blobAsyncClient.getBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient);
when(blobAsyncClient.exists()).thenReturn(Mono.just(true));
when(blobAsyncClient.setMetadata(ArgumentMatchers.<Map<String, String>>any()))
.thenReturn(Mono.empty());
final BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.updateCheckpoint(checkpoint))
.verifyComplete();
}
/**
* Tests that errors are thrown if the checkpoint is invalid
*/
@Test
public void testUpdateCheckpointInvalid() {
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
assertThrows(IllegalStateException.class, () -> blobCheckpointStore.updateCheckpoint(null));
assertThrows(IllegalStateException.class, () -> blobCheckpointStore.updateCheckpoint(new Checkpoint()));
}
/**
* Tests that will update checkpoint if one does not exist.
*/
@Test
/**
* Tests claiming ownership on a partition that never had an entry.
*/
@Test
public void testClaimOwnership() {
PartitionOwnership po = createPartitionOwnership("ns", "eh", "cg", "1", "owner1");
HttpHeaders httpHeaders = new HttpHeaders();
httpHeaders.add(HttpHeaderName.ETAG, "etag2");
when(blobContainerAsyncClient.getBlobAsyncClient("ns/eh/cg/ownership/1")).thenReturn(blobAsyncClient);
when(blobAsyncClient.getBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient);
when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L),
isNull(), ArgumentMatchers.<Map<String, String>>any(), isNull(), isNull(),
any(BlobRequestConditions.class)))
.thenReturn(Mono.just(new ResponseBase<>(null, 200, httpHeaders, null, null)));
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.claimOwnership(Collections.singletonList(po)))
.assertNext(partitionOwnership -> {
assertEquals("owner1", partitionOwnership.getOwnerId());
assertEquals("1", partitionOwnership.getPartitionId());
assertEquals("eh", partitionOwnership.getEventHubName());
assertEquals("cg", partitionOwnership.getConsumerGroup());
assertEquals("etag2", partitionOwnership.getETag());
}).verifyComplete();
}
/**
* Tests claiming ownership on a previously owned partition.
*/
@Test
public void testClaimOwnershipExistingBlob() {
PartitionOwnership po = createPartitionOwnership("ns", "eh", "cg", "0", "owner1");
po.setETag("1");
HttpHeaders httpHeaders = new HttpHeaders();
httpHeaders.add(HttpHeaderName.ETAG, "2");
when(blobContainerAsyncClient.getBlobAsyncClient("ns/eh/cg/ownership/0")).thenReturn(blobAsyncClient);
when(blobAsyncClient
.setMetadataWithResponse(ArgumentMatchers.<Map<String, String>>any(), any(BlobRequestConditions.class)))
.thenReturn(Mono.just(new ResponseBase<>(null, 200, httpHeaders, null, null)));
when(blobAsyncClient.exists()).thenReturn(Mono.just(true));
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.claimOwnership(Collections.singletonList(po)))
.assertNext(partitionOwnership -> {
assertEquals("owner1", partitionOwnership.getOwnerId());
assertEquals("0", partitionOwnership.getPartitionId());
assertEquals("eh", partitionOwnership.getEventHubName());
assertEquals("cg", partitionOwnership.getConsumerGroup());
assertEquals("2", partitionOwnership.getETag());
}).verifyComplete();
}
/**
* Tests that a failed ownership claim returns normally instead of throwing exception downstream.
*/
@Test
public void testClaimOwnershipFailed() {
final String namespace = "foo.servicebus.windows.net";
final String eventHubName = "test-event-hub";
final String consumerGroup = "test-cg";
final String partitionId = "0";
final String ownerId = "owner-id-1";
final PartitionOwnership po =
createPartitionOwnership(namespace, eventHubName, consumerGroup, partitionId, ownerId);
final String ownershipPath = getLegacyPrefix(namespace, eventHubName, consumerGroup)
+ OWNERSHIP_PATH + partitionId;
when(blobContainerAsyncClient.getBlobAsyncClient(ownershipPath)).thenReturn(blobAsyncClient);
when(blobAsyncClient.getBlockBlobAsyncClient()).thenReturn(blockBlobAsyncClient);
when(blockBlobAsyncClient.uploadWithResponse(ArgumentMatchers.<Flux<ByteBuffer>>any(), eq(0L),
isNull(), ArgumentMatchers.<Map<String, String>>any(), isNull(), isNull(),
any(BlobRequestConditions.class)))
.thenReturn(Mono.error(new ResourceModifiedException("Etag did not match", null)));
final BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.claimOwnership(Collections.singletonList(po)))
.verifyComplete();
final PartitionOwnership po2 = createPartitionOwnership(namespace, eventHubName, consumerGroup, partitionId,
ownerId)
.setETag("1");
when(blobAsyncClient
.setMetadataWithResponse(ArgumentMatchers.<Map<String, String>>any(), any(BlobRequestConditions.class)))
.thenReturn(Mono.error(new ResourceModifiedException("Etag did not match", null)));
StepVerifier.create(blobCheckpointStore.claimOwnership(Collections.singletonList(po2)))
.verifyComplete();
final BlobContainerAsyncClient anotherContainerClient = mock(BlobContainerAsyncClient.class);
final BlobCheckpointStore anotherCheckpointStore = new BlobCheckpointStore(anotherContainerClient);
when(anotherContainerClient.getBlobAsyncClient(anyString())).thenReturn(null);
StepVerifier.create(anotherCheckpointStore.claimOwnership(Collections.singletonList(po)))
.verifyComplete();
}
/**
* Tests that an error is returned when {@link CheckpointStore
*/
@Test
public void testUpdateCheckpointError() {
Checkpoint checkpoint = new Checkpoint()
.setFullyQualifiedNamespace("ns")
.setEventHubName("eh")
.setConsumerGroup("cg")
.setPartitionId("0")
.setSequenceNumber(2L)
.setOffset(100L);
when(blobContainerAsyncClient.getBlobAsyncClient("ns/eh/cg/checkpoint/0")).thenReturn(blobAsyncClient);
when(blobAsyncClient.exists()).thenReturn(Mono.just(true));
when(blobAsyncClient.setMetadata(ArgumentMatchers.<Map<String, String>>any()))
.thenReturn(Mono.error(new SocketTimeoutException()));
BlobCheckpointStore blobCheckpointStore = new BlobCheckpointStore(blobContainerAsyncClient);
StepVerifier.create(blobCheckpointStore.updateCheckpoint(checkpoint))
.expectError(SocketTimeoutException.class).verify();
}
private static PartitionOwnership createPartitionOwnership(String fullyQualifiedNamespace, String eventHubName,
String consumerGroupName, String partitionId, String ownerId) {
return new PartitionOwnership()
.setFullyQualifiedNamespace(fullyQualifiedNamespace)
.setEventHubName(eventHubName)
.setConsumerGroup(consumerGroupName)
.setPartitionId(partitionId)
.setOwnerId(ownerId);
}
private static BlobItem getOwnershipBlobItem(String owner, String etag, String blobName) {
Map<String, String> metadata = new HashMap<>();
metadata.put(OWNER_ID, owner);
BlobItemProperties properties = new BlobItemProperties()
.setLastModified(OffsetDateTime.now())
.setETag(etag);
return new BlobItem()
.setName(blobName)
.setMetadata(metadata)
.setProperties(properties);
}
private static BlobItem getCheckpointBlobItem(String offset, String sequenceNumber, String blobName) {
Map<String, String> metadata = new HashMap<>();
metadata.put(SEQUENCE_NUMBER, sequenceNumber);
metadata.put(OFFSET, offset);
return new BlobItem()
.setName(blobName)
.setMetadata(metadata);
}
private static String getLegacyPrefix(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) {
return String.join("/", fullyQualifiedNamespace, eventHubName, consumerGroup);
}
} |
Anything particular about this PER_GB2018 SKU? | public void testCreateWorkspace() {
Workspace workspace = null;
try {
String spaceName = "space" + randomPadding();
workspace = logAnalyticsManager.workspaces()
.define(spaceName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withSku(new WorkspaceSku().withName(WorkspaceSkuNameEnum.PER_GB2018))
.withFeatures(new WorkspaceFeatures().withEnableLogAccessUsingOnlyResourcePermissions(true))
.withWorkspaceCapping(new WorkspaceCapping().withDailyQuotaGb(-1D))
.withRetentionInDays(30)
.withPublicNetworkAccessForIngestion(com.azure.resourcemanager.loganalytics.models.PublicNetworkAccessType.ENABLED)
.withPublicNetworkAccessForQuery(com.azure.resourcemanager.loganalytics.models.PublicNetworkAccessType.ENABLED)
.create();
workspace.refresh();
Assertions.assertEquals(workspace.name(), spaceName);
Assertions.assertEquals(workspace.name(), logAnalyticsManager.workspaces().getById(workspace.id()).name());
Assertions.assertTrue(logAnalyticsManager.workspaces().list().stream().findAny().isPresent());
} finally {
if (workspace != null) {
logAnalyticsManager.workspaces().deleteById(workspace.id());
}
}
} | .withSku(new WorkspaceSku().withName(WorkspaceSkuNameEnum.PER_GB2018)) | public void testCreateWorkspace() {
Workspace workspace = null;
try {
String spaceName = "space" + randomPadding();
workspace = logAnalyticsManager.workspaces()
.define(spaceName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.create();
workspace.refresh();
Assertions.assertEquals(workspace.name(), spaceName);
Assertions.assertEquals(workspace.name(), logAnalyticsManager.workspaces().getById(workspace.id()).name());
Assertions.assertTrue(logAnalyticsManager.workspaces().list().stream().findAny().isPresent());
} finally {
if (workspace != null) {
logAnalyticsManager.workspaces().deleteById(workspace.id());
}
}
} | class LogAnalyticsManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private LogAnalyticsManager logAnalyticsManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
logAnalyticsManager = LogAnalyticsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class LogAnalyticsManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private LogAnalyticsManager logAnalyticsManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
logAnalyticsManager = LogAnalyticsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
Could we disable it? We prefer not to have public access for test resource, unless necessary. | public void testCreateWorkspace() {
Workspace workspace = null;
try {
String spaceName = "space" + randomPadding();
workspace = logAnalyticsManager.workspaces()
.define(spaceName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withSku(new WorkspaceSku().withName(WorkspaceSkuNameEnum.PER_GB2018))
.withFeatures(new WorkspaceFeatures().withEnableLogAccessUsingOnlyResourcePermissions(true))
.withWorkspaceCapping(new WorkspaceCapping().withDailyQuotaGb(-1D))
.withRetentionInDays(30)
.withPublicNetworkAccessForIngestion(com.azure.resourcemanager.loganalytics.models.PublicNetworkAccessType.ENABLED)
.withPublicNetworkAccessForQuery(com.azure.resourcemanager.loganalytics.models.PublicNetworkAccessType.ENABLED)
.create();
workspace.refresh();
Assertions.assertEquals(workspace.name(), spaceName);
Assertions.assertEquals(workspace.name(), logAnalyticsManager.workspaces().getById(workspace.id()).name());
Assertions.assertTrue(logAnalyticsManager.workspaces().list().stream().findAny().isPresent());
} finally {
if (workspace != null) {
logAnalyticsManager.workspaces().deleteById(workspace.id());
}
}
} | .withPublicNetworkAccessForQuery(com.azure.resourcemanager.loganalytics.models.PublicNetworkAccessType.ENABLED) | public void testCreateWorkspace() {
Workspace workspace = null;
try {
String spaceName = "space" + randomPadding();
workspace = logAnalyticsManager.workspaces()
.define(spaceName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.create();
workspace.refresh();
Assertions.assertEquals(workspace.name(), spaceName);
Assertions.assertEquals(workspace.name(), logAnalyticsManager.workspaces().getById(workspace.id()).name());
Assertions.assertTrue(logAnalyticsManager.workspaces().list().stream().findAny().isPresent());
} finally {
if (workspace != null) {
logAnalyticsManager.workspaces().deleteById(workspace.id());
}
}
} | class LogAnalyticsManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private LogAnalyticsManager logAnalyticsManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
logAnalyticsManager = LogAnalyticsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class LogAnalyticsManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private LogAnalyticsManager logAnalyticsManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
logAnalyticsManager = LogAnalyticsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
Fixed to be the most streamlined parameters in the new version. | public void testCreateWorkspace() {
Workspace workspace = null;
try {
String spaceName = "space" + randomPadding();
workspace = logAnalyticsManager.workspaces()
.define(spaceName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withSku(new WorkspaceSku().withName(WorkspaceSkuNameEnum.PER_GB2018))
.withFeatures(new WorkspaceFeatures().withEnableLogAccessUsingOnlyResourcePermissions(true))
.withWorkspaceCapping(new WorkspaceCapping().withDailyQuotaGb(-1D))
.withRetentionInDays(30)
.withPublicNetworkAccessForIngestion(com.azure.resourcemanager.loganalytics.models.PublicNetworkAccessType.ENABLED)
.withPublicNetworkAccessForQuery(com.azure.resourcemanager.loganalytics.models.PublicNetworkAccessType.ENABLED)
.create();
workspace.refresh();
Assertions.assertEquals(workspace.name(), spaceName);
Assertions.assertEquals(workspace.name(), logAnalyticsManager.workspaces().getById(workspace.id()).name());
Assertions.assertTrue(logAnalyticsManager.workspaces().list().stream().findAny().isPresent());
} finally {
if (workspace != null) {
logAnalyticsManager.workspaces().deleteById(workspace.id());
}
}
} | .withPublicNetworkAccessForQuery(com.azure.resourcemanager.loganalytics.models.PublicNetworkAccessType.ENABLED) | public void testCreateWorkspace() {
Workspace workspace = null;
try {
String spaceName = "space" + randomPadding();
workspace = logAnalyticsManager.workspaces()
.define(spaceName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.create();
workspace.refresh();
Assertions.assertEquals(workspace.name(), spaceName);
Assertions.assertEquals(workspace.name(), logAnalyticsManager.workspaces().getById(workspace.id()).name());
Assertions.assertTrue(logAnalyticsManager.workspaces().list().stream().findAny().isPresent());
} finally {
if (workspace != null) {
logAnalyticsManager.workspaces().deleteById(workspace.id());
}
}
} | class LogAnalyticsManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private LogAnalyticsManager logAnalyticsManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
logAnalyticsManager = LogAnalyticsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class LogAnalyticsManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private LogAnalyticsManager logAnalyticsManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
logAnalyticsManager = LogAnalyticsManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
Who will inject these two accessor? Our code or the customer's code? Because we don't seem to have default values for these two in our configuration. | private TargetingFilterContext buildContext(Object appContext) {
if (contextualAccessor != null) {
TargetingFilterContext targetingContext = new TargetingFilterContext();
contextualAccessor.configureTargetingContext(targetingContext, appContext);
return targetingContext;
}
if (contextAccessor != null) {
TargetingFilterContext targetingContext = new TargetingFilterContext();
contextAccessor.configureTargetingContext(targetingContext);
return targetingContext;
}
throw new FeatureManagementException("No Targeting Filter Context found to assign variant.");
} | throw new FeatureManagementException("No Targeting Filter Context found to assign variant."); | private TargetingFilterContext buildContext(Object appContext) {
TargetingFilterContext targetingContext = new TargetingFilterContext();
if (contextualAccessor != null && (appContext != null || contextAccessor == null)) {
contextualAccessor.configureTargetingContext(targetingContext, appContext);
return targetingContext;
}
if (contextAccessor != null) {
contextAccessor.configureTargetingContext(targetingContext);
return targetingContext;
}
throw new FeatureManagementException("No Targeting Filter Context found to assign variant.");
} | class exist and set as an @Component?",
filter.getName());
if (properties.isFailFast()) {
String message = "Fail fast is set and a Filter was unable to be found";
ReflectionUtils.rethrowRuntimeException(new FilterNotFoundException(message, e, filter));
} | class exist and set as an @Component?",
filter.getName());
if (properties.isFailFast()) {
String message = "Fail fast is set and a Filter was unable to be found";
ReflectionUtils.rethrowRuntimeException(new FilterNotFoundException(message, e, filter));
} |
And are these two exclusive to each other? | private TargetingFilterContext buildContext(Object appContext) {
if (contextualAccessor != null) {
TargetingFilterContext targetingContext = new TargetingFilterContext();
contextualAccessor.configureTargetingContext(targetingContext, appContext);
return targetingContext;
}
if (contextAccessor != null) {
TargetingFilterContext targetingContext = new TargetingFilterContext();
contextAccessor.configureTargetingContext(targetingContext);
return targetingContext;
}
throw new FeatureManagementException("No Targeting Filter Context found to assign variant.");
} | throw new FeatureManagementException("No Targeting Filter Context found to assign variant."); | private TargetingFilterContext buildContext(Object appContext) {
TargetingFilterContext targetingContext = new TargetingFilterContext();
if (contextualAccessor != null && (appContext != null || contextAccessor == null)) {
contextualAccessor.configureTargetingContext(targetingContext, appContext);
return targetingContext;
}
if (contextAccessor != null) {
contextAccessor.configureTargetingContext(targetingContext);
return targetingContext;
}
throw new FeatureManagementException("No Targeting Filter Context found to assign variant.");
} | class exist and set as an @Component?",
filter.getName());
if (properties.isFailFast()) {
String message = "Fail fast is set and a Filter was unable to be found";
ReflectionUtils.rethrowRuntimeException(new FilterNotFoundException(message, e, filter));
} | class exist and set as an @Component?",
filter.getName());
if (properties.isFailFast()) {
String message = "Fail fast is set and a Filter was unable to be found";
ReflectionUtils.rethrowRuntimeException(new FilterNotFoundException(message, e, filter));
} |
These are provided by the customer. These objects are the ones that are used to provide the current user's user_id and group info, for various forms of targeting. Only one of them is ever used at a time. In theory only the new one is needed, but modifying or removing the old one would be a breaking change. Also, just updated the logic for how this works. | private TargetingFilterContext buildContext(Object appContext) {
if (contextualAccessor != null) {
TargetingFilterContext targetingContext = new TargetingFilterContext();
contextualAccessor.configureTargetingContext(targetingContext, appContext);
return targetingContext;
}
if (contextAccessor != null) {
TargetingFilterContext targetingContext = new TargetingFilterContext();
contextAccessor.configureTargetingContext(targetingContext);
return targetingContext;
}
throw new FeatureManagementException("No Targeting Filter Context found to assign variant.");
} | throw new FeatureManagementException("No Targeting Filter Context found to assign variant."); | private TargetingFilterContext buildContext(Object appContext) {
TargetingFilterContext targetingContext = new TargetingFilterContext();
if (contextualAccessor != null && (appContext != null || contextAccessor == null)) {
contextualAccessor.configureTargetingContext(targetingContext, appContext);
return targetingContext;
}
if (contextAccessor != null) {
contextAccessor.configureTargetingContext(targetingContext);
return targetingContext;
}
throw new FeatureManagementException("No Targeting Filter Context found to assign variant.");
} | class exist and set as an @Component?",
filter.getName());
if (properties.isFailFast()) {
String message = "Fail fast is set and a Filter was unable to be found";
ReflectionUtils.rethrowRuntimeException(new FilterNotFoundException(message, e, filter));
} | class exist and set as an @Component?",
filter.getName());
if (properties.isFailFast()) {
String message = "Fail fast is set and a Filter was unable to be found";
ReflectionUtils.rethrowRuntimeException(new FilterNotFoundException(message, e, filter));
} |
In .NET, I believe we unconditionally use the tenantID. I'm just curious why this logic is needed. For example: https://github.com/Azure/azure-sdk-for-net/blob/8d7f7deee7baf440bba3a85b586ebcec406e1b47/sdk/identity/Azure.Identity/src/MsalPublicClient.cs#L41C22-L41C22 | public AccessToken authenticateWithAzureCli(TokenRequestContext request) {
StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource ");
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
azCommand.append(scopes);
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azCommand.append(" --tenant ").append(tenant);
}
try {
return getTokenFromAzureCLIAuthentication(azCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
} | if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) { | public AccessToken authenticateWithAzureCli(TokenRequestContext request) {
StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource ");
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
azCommand.append(scopes);
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azCommand.append(" --tenant ").append(tenant);
}
try {
return getTokenFromAzureCLIAuthentication(azCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
} | class IdentitySyncClient extends IdentityClientBase {
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessor;
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> managedIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> workloadIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<String> clientAssertionAccessor;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param clientSecret the client secret of the application.
* @param resourceId the resource ID of the application
* @param certificatePath the path to the PKCS12 or PEM certificate of the application.
* @param certificate the PKCS12 or PEM certificate of the application.
* @param certificatePassword the password protecting the PFX certificate.
* @param isSharedTokenCacheCredential Indicate whether the credential is
* {@link com.azure.identity.SharedTokenCacheCredential} or not.
* @param clientAssertionTimeout the timeout to use for the client assertion.
* @param options the options configuring the client.
*/
IdentitySyncClient(String tenantId, String clientId, String clientSecret, String certificatePath,
String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier,
InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential,
Duration clientAssertionTimeout, IdentityClientOptions options) {
super(tenantId, clientId, clientSecret, certificatePath, clientAssertionFilePath, resourceId, clientAssertionSupplier,
certificate, certificatePassword, isSharedTokenCacheCredential, clientAssertionTimeout, options);
this.publicClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, false));
this.publicClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, true));
this.confidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getConfidentialClient(false));
this.confidentialClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getConfidentialClient(true));
this.managedIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getManagedIdentityConfidentialClient());
this.workloadIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getWorkloadIdentityConfidentialClient());
this.clientAssertionAccessor = clientAssertionTimeout == null
? new SynchronousAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5))
: new SynchronousAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout);
}
private String parseClientAssertion() {
if (clientAssertionFilePath != null) {
try {
byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath));
return new String(encoded, StandardCharsets.UTF_8);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} else {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"Client Assertion File Path is not provided."
+ " It should be provided to authenticate with client assertion."
));
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = getConfidentialClientInstance(request).getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
if (clientAssertionSupplier != null) {
builder.clientCredential(ClientCredentialFactory
.createFromClientAssertion(clientAssertionSupplier.get()));
}
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (InterruptedException | ExecutionException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private SynchronousAccessor<ConfidentialClientApplication> getConfidentialClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? confidentialClientApplicationAccessorWithCae : confidentialClientApplicationAccessor;
}
private SynchronousAccessor<PublicClientApplication> getPublicClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? publicClientApplicationAccessorWithCae : publicClientApplicationAccessor;
}
public AccessToken authenticateWithManagedIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = managedIdentityConfidentialClientApplicationAccessor.getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
public AccessToken authenticateWithConfidentialClientCache(TokenRequestContext request) {
ConfidentialClientApplication confidentialClientApplication = getConfidentialClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
try {
IAuthenticationResult authenticationResult = confidentialClientApplication.acquireTokenSilently(parametersBuilder.build()).get();
AccessToken accessToken = new MsalToken(authenticationResult);
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
} else {
throw new IllegalStateException("Received token is close to expiry.");
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
* @return a Publisher that emits an AccessToken
*/
@SuppressWarnings("deprecation")
public MsalToken authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()));
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
if (account != null) {
parametersBuilder = parametersBuilder.account(account);
}
parametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
MsalToken accessToken = new MsalToken(pc.acquireTokenSilently(parametersBuilder.build()).get());
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes())).forceRefresh(true);
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest
.formatAsClaimsRequest(request.getClaims());
forceParametersBuilder.claims(customClaimRequest);
}
if (account != null) {
forceParametersBuilder = forceParametersBuilder.account(account);
}
forceParametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(pc.acquireTokenSilently(forceParametersBuilder.build()).get());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder =
buildUsernamePasswordFlowParameters(request, username, password);
try {
return new MsalToken(pc.acquireToken(userNamePasswordParametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with username and "
+ "password. To mitigate this issue, please refer to the troubleshooting guidelines "
+ "here at https:
null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public MsalToken authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = buildDeviceCodeFlowParameters(request, deviceCodeConsumer);
try {
return new MsalToken(pc.acquireToken(parametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with device code.", null, e));
}
}
/**
* Synchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @param redirectUrl the redirect URL to listen on and receive security code
* @param loginHint the username suggestion to pre-fill the login page's username/email address field
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithBrowserInteraction(TokenRequestContext request, Integer port,
String redirectUrl, String loginHint) {
URI redirectUri;
String redirect;
if (port != null) {
redirect = HTTP_LOCALHOST + ":" + port;
} else if (redirectUrl != null) {
redirect = redirectUrl;
} else {
redirect = HTTP_LOCALHOST;
}
try {
redirectUri = new URI(redirect);
} catch (URISyntaxException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
InteractiveRequestParameters.InteractiveRequestParametersBuilder builder =
buildInteractiveRequestParameters(request, loginHint, redirectUri);
PublicClientApplication pc = getPublicClientInstance(request).getValue();
try {
return new MsalToken(pc.acquireToken(builder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(
"Failed to acquire token with Interactive Browser Authentication.", null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
/**
* Asynchronously acquire a token from Active Directory with Azure Developer CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureDeveloperCli(TokenRequestContext request) {
StringBuilder azdCommand = new StringBuilder("azd auth token --output json --scope ");
List<String> scopes = request.getScopes();
if (scopes.size() == 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Missing scope in request"));
}
scopes.forEach(scope -> {
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
});
azdCommand.append(String.join(" --scope ", scopes));
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azdCommand.append(" --tenant-id ").append(tenant);
}
try {
return getTokenFromAzureDeveloperCLIAuthentication(azdCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithOBO(TokenRequestContext request) {
ConfidentialClientApplication cc = getConfidentialClientInstance(request).getValue();
try {
return new MsalToken(cc.acquireToken(buildOBOFlowParameters(request)).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with On Behalf Of Authentication.", null, e));
}
}
public AccessToken authenticateWithExchangeTokenSync(TokenRequestContext request) {
try {
String assertionToken = clientAssertionAccessor.getValue();
return authenticateWithExchangeTokenHelper(request, assertionToken);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider() {
return appTokenProviderParameters -> {
TokenRequestContext trc = new TokenRequestContext()
.setScopes(new ArrayList<>(appTokenProviderParameters.scopes))
.setClaims(appTokenProviderParameters.claims)
.setTenantId(appTokenProviderParameters.tenantId);
AccessToken accessToken = authenticateWithExchangeTokenSync(trc);
Supplier<TokenProviderResult> tokenProviderResultSupplier = () -> {
TokenProviderResult result = new TokenProviderResult();
result.setAccessToken(accessToken.getToken());
result.setTenantId(trc.getTenantId());
result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond());
return result;
};
return options.getExecutorService() != null
? CompletableFuture.supplyAsync(tokenProviderResultSupplier, options.getExecutorService())
: CompletableFuture.supplyAsync(tokenProviderResultSupplier);
};
}
public AccessToken authenticateWithWorkloadIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient =
workloadIdentityConfidentialClientApplicationAccessor.getValue();
try {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Get the configured identity client options.
*
* @return the client options.
*/
public IdentityClientOptions getIdentityClientOptions() {
return options;
}
@Override
Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext) {
return null;
}
} | class IdentitySyncClient extends IdentityClientBase {
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessor;
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> managedIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> workloadIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<String> clientAssertionAccessor;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param clientSecret the client secret of the application.
* @param resourceId the resource ID of the application
* @param certificatePath the path to the PKCS12 or PEM certificate of the application.
* @param certificate the PKCS12 or PEM certificate of the application.
* @param certificatePassword the password protecting the PFX certificate.
* @param isSharedTokenCacheCredential Indicate whether the credential is
* {@link com.azure.identity.SharedTokenCacheCredential} or not.
* @param clientAssertionTimeout the timeout to use for the client assertion.
* @param options the options configuring the client.
*/
IdentitySyncClient(String tenantId, String clientId, String clientSecret, String certificatePath,
String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier,
InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential,
Duration clientAssertionTimeout, IdentityClientOptions options) {
super(tenantId, clientId, clientSecret, certificatePath, clientAssertionFilePath, resourceId, clientAssertionSupplier,
certificate, certificatePassword, isSharedTokenCacheCredential, clientAssertionTimeout, options);
this.publicClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, false));
this.publicClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, true));
this.confidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getConfidentialClient(false));
this.confidentialClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getConfidentialClient(true));
this.managedIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getManagedIdentityConfidentialClient());
this.workloadIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getWorkloadIdentityConfidentialClient());
this.clientAssertionAccessor = clientAssertionTimeout == null
? new SynchronousAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5))
: new SynchronousAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout);
}
private String parseClientAssertion() {
if (clientAssertionFilePath != null) {
try {
byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath));
return new String(encoded, StandardCharsets.UTF_8);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} else {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"Client Assertion File Path is not provided."
+ " It should be provided to authenticate with client assertion."
));
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = getConfidentialClientInstance(request).getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
if (clientAssertionSupplier != null) {
builder.clientCredential(ClientCredentialFactory
.createFromClientAssertion(clientAssertionSupplier.get()));
}
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (InterruptedException | ExecutionException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private SynchronousAccessor<ConfidentialClientApplication> getConfidentialClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? confidentialClientApplicationAccessorWithCae : confidentialClientApplicationAccessor;
}
private SynchronousAccessor<PublicClientApplication> getPublicClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? publicClientApplicationAccessorWithCae : publicClientApplicationAccessor;
}
public AccessToken authenticateWithManagedIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = managedIdentityConfidentialClientApplicationAccessor.getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
public AccessToken authenticateWithConfidentialClientCache(TokenRequestContext request) {
ConfidentialClientApplication confidentialClientApplication = getConfidentialClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
try {
IAuthenticationResult authenticationResult = confidentialClientApplication.acquireTokenSilently(parametersBuilder.build()).get();
AccessToken accessToken = new MsalToken(authenticationResult);
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
} else {
throw new IllegalStateException("Received token is close to expiry.");
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
* @return a Publisher that emits an AccessToken
*/
@SuppressWarnings("deprecation")
public MsalToken authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()));
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
if (account != null) {
parametersBuilder = parametersBuilder.account(account);
}
parametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
MsalToken accessToken = new MsalToken(pc.acquireTokenSilently(parametersBuilder.build()).get());
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes())).forceRefresh(true);
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest
.formatAsClaimsRequest(request.getClaims());
forceParametersBuilder.claims(customClaimRequest);
}
if (account != null) {
forceParametersBuilder = forceParametersBuilder.account(account);
}
forceParametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(pc.acquireTokenSilently(forceParametersBuilder.build()).get());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder =
buildUsernamePasswordFlowParameters(request, username, password);
try {
return new MsalToken(pc.acquireToken(userNamePasswordParametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with username and "
+ "password. To mitigate this issue, please refer to the troubleshooting guidelines "
+ "here at https:
null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public MsalToken authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = buildDeviceCodeFlowParameters(request, deviceCodeConsumer);
try {
return new MsalToken(pc.acquireToken(parametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with device code.", null, e));
}
}
/**
* Synchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @param redirectUrl the redirect URL to listen on and receive security code
* @param loginHint the username suggestion to pre-fill the login page's username/email address field
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithBrowserInteraction(TokenRequestContext request, Integer port,
String redirectUrl, String loginHint) {
URI redirectUri;
String redirect;
if (port != null) {
redirect = HTTP_LOCALHOST + ":" + port;
} else if (redirectUrl != null) {
redirect = redirectUrl;
} else {
redirect = HTTP_LOCALHOST;
}
try {
redirectUri = new URI(redirect);
} catch (URISyntaxException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
InteractiveRequestParameters.InteractiveRequestParametersBuilder builder =
buildInteractiveRequestParameters(request, loginHint, redirectUri);
PublicClientApplication pc = getPublicClientInstance(request).getValue();
try {
return new MsalToken(pc.acquireToken(builder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(
"Failed to acquire token with Interactive Browser Authentication.", null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
/**
* Asynchronously acquire a token from Active Directory with Azure Developer CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureDeveloperCli(TokenRequestContext request) {
StringBuilder azdCommand = new StringBuilder("azd auth token --output json --scope ");
List<String> scopes = request.getScopes();
if (scopes.size() == 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Missing scope in request"));
}
scopes.forEach(scope -> {
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
});
azdCommand.append(String.join(" --scope ", scopes));
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azdCommand.append(" --tenant-id ").append(tenant);
}
try {
return getTokenFromAzureDeveloperCLIAuthentication(azdCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithOBO(TokenRequestContext request) {
ConfidentialClientApplication cc = getConfidentialClientInstance(request).getValue();
try {
return new MsalToken(cc.acquireToken(buildOBOFlowParameters(request)).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with On Behalf Of Authentication.", null, e));
}
}
public AccessToken authenticateWithExchangeTokenSync(TokenRequestContext request) {
try {
String assertionToken = clientAssertionAccessor.getValue();
return authenticateWithExchangeTokenHelper(request, assertionToken);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider() {
return appTokenProviderParameters -> {
TokenRequestContext trc = new TokenRequestContext()
.setScopes(new ArrayList<>(appTokenProviderParameters.scopes))
.setClaims(appTokenProviderParameters.claims)
.setTenantId(appTokenProviderParameters.tenantId);
AccessToken accessToken = authenticateWithExchangeTokenSync(trc);
Supplier<TokenProviderResult> tokenProviderResultSupplier = () -> {
TokenProviderResult result = new TokenProviderResult();
result.setAccessToken(accessToken.getToken());
result.setTenantId(trc.getTenantId());
result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond());
return result;
};
return options.getExecutorService() != null
? CompletableFuture.supplyAsync(tokenProviderResultSupplier, options.getExecutorService())
: CompletableFuture.supplyAsync(tokenProviderResultSupplier);
};
}
public AccessToken authenticateWithWorkloadIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient =
workloadIdentityConfidentialClientApplicationAccessor.getValue();
try {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Get the configured identity client options.
*
* @return the client options.
*/
public IdentityClientOptions getIdentityClientOptions() {
return options;
}
@Override
Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext) {
return null;
}
} |
`organizations` is not a valid tenant for getting a token from a service principal. A service principal should always be bound to a given tenant. The customer was not passing this through - we made a change a while go that introduced this bug and caused the issue. After researching and talking to @jiasli it was determined that we should never pass this to azcli so we put the check in. | public AccessToken authenticateWithAzureCli(TokenRequestContext request) {
StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource ");
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
azCommand.append(scopes);
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azCommand.append(" --tenant ").append(tenant);
}
try {
return getTokenFromAzureCLIAuthentication(azCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
} | if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) { | public AccessToken authenticateWithAzureCli(TokenRequestContext request) {
StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource ");
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
azCommand.append(scopes);
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azCommand.append(" --tenant ").append(tenant);
}
try {
return getTokenFromAzureCLIAuthentication(azCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
} | class IdentitySyncClient extends IdentityClientBase {
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessor;
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> managedIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> workloadIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<String> clientAssertionAccessor;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param clientSecret the client secret of the application.
* @param resourceId the resource ID of the application
* @param certificatePath the path to the PKCS12 or PEM certificate of the application.
* @param certificate the PKCS12 or PEM certificate of the application.
* @param certificatePassword the password protecting the PFX certificate.
* @param isSharedTokenCacheCredential Indicate whether the credential is
* {@link com.azure.identity.SharedTokenCacheCredential} or not.
* @param clientAssertionTimeout the timeout to use for the client assertion.
* @param options the options configuring the client.
*/
IdentitySyncClient(String tenantId, String clientId, String clientSecret, String certificatePath,
String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier,
InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential,
Duration clientAssertionTimeout, IdentityClientOptions options) {
super(tenantId, clientId, clientSecret, certificatePath, clientAssertionFilePath, resourceId, clientAssertionSupplier,
certificate, certificatePassword, isSharedTokenCacheCredential, clientAssertionTimeout, options);
this.publicClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, false));
this.publicClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, true));
this.confidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getConfidentialClient(false));
this.confidentialClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getConfidentialClient(true));
this.managedIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getManagedIdentityConfidentialClient());
this.workloadIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getWorkloadIdentityConfidentialClient());
this.clientAssertionAccessor = clientAssertionTimeout == null
? new SynchronousAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5))
: new SynchronousAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout);
}
private String parseClientAssertion() {
if (clientAssertionFilePath != null) {
try {
byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath));
return new String(encoded, StandardCharsets.UTF_8);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} else {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"Client Assertion File Path is not provided."
+ " It should be provided to authenticate with client assertion."
));
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = getConfidentialClientInstance(request).getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
if (clientAssertionSupplier != null) {
builder.clientCredential(ClientCredentialFactory
.createFromClientAssertion(clientAssertionSupplier.get()));
}
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (InterruptedException | ExecutionException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private SynchronousAccessor<ConfidentialClientApplication> getConfidentialClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? confidentialClientApplicationAccessorWithCae : confidentialClientApplicationAccessor;
}
private SynchronousAccessor<PublicClientApplication> getPublicClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? publicClientApplicationAccessorWithCae : publicClientApplicationAccessor;
}
public AccessToken authenticateWithManagedIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = managedIdentityConfidentialClientApplicationAccessor.getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
public AccessToken authenticateWithConfidentialClientCache(TokenRequestContext request) {
ConfidentialClientApplication confidentialClientApplication = getConfidentialClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
try {
IAuthenticationResult authenticationResult = confidentialClientApplication.acquireTokenSilently(parametersBuilder.build()).get();
AccessToken accessToken = new MsalToken(authenticationResult);
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
} else {
throw new IllegalStateException("Received token is close to expiry.");
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
* @return a Publisher that emits an AccessToken
*/
@SuppressWarnings("deprecation")
public MsalToken authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()));
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
if (account != null) {
parametersBuilder = parametersBuilder.account(account);
}
parametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
MsalToken accessToken = new MsalToken(pc.acquireTokenSilently(parametersBuilder.build()).get());
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes())).forceRefresh(true);
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest
.formatAsClaimsRequest(request.getClaims());
forceParametersBuilder.claims(customClaimRequest);
}
if (account != null) {
forceParametersBuilder = forceParametersBuilder.account(account);
}
forceParametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(pc.acquireTokenSilently(forceParametersBuilder.build()).get());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder =
buildUsernamePasswordFlowParameters(request, username, password);
try {
return new MsalToken(pc.acquireToken(userNamePasswordParametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with username and "
+ "password. To mitigate this issue, please refer to the troubleshooting guidelines "
+ "here at https:
null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public MsalToken authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = buildDeviceCodeFlowParameters(request, deviceCodeConsumer);
try {
return new MsalToken(pc.acquireToken(parametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with device code.", null, e));
}
}
/**
* Synchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @param redirectUrl the redirect URL to listen on and receive security code
* @param loginHint the username suggestion to pre-fill the login page's username/email address field
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithBrowserInteraction(TokenRequestContext request, Integer port,
String redirectUrl, String loginHint) {
URI redirectUri;
String redirect;
if (port != null) {
redirect = HTTP_LOCALHOST + ":" + port;
} else if (redirectUrl != null) {
redirect = redirectUrl;
} else {
redirect = HTTP_LOCALHOST;
}
try {
redirectUri = new URI(redirect);
} catch (URISyntaxException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
InteractiveRequestParameters.InteractiveRequestParametersBuilder builder =
buildInteractiveRequestParameters(request, loginHint, redirectUri);
PublicClientApplication pc = getPublicClientInstance(request).getValue();
try {
return new MsalToken(pc.acquireToken(builder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(
"Failed to acquire token with Interactive Browser Authentication.", null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
/**
* Asynchronously acquire a token from Active Directory with Azure Developer CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureDeveloperCli(TokenRequestContext request) {
StringBuilder azdCommand = new StringBuilder("azd auth token --output json --scope ");
List<String> scopes = request.getScopes();
if (scopes.size() == 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Missing scope in request"));
}
scopes.forEach(scope -> {
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
});
azdCommand.append(String.join(" --scope ", scopes));
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azdCommand.append(" --tenant-id ").append(tenant);
}
try {
return getTokenFromAzureDeveloperCLIAuthentication(azdCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithOBO(TokenRequestContext request) {
ConfidentialClientApplication cc = getConfidentialClientInstance(request).getValue();
try {
return new MsalToken(cc.acquireToken(buildOBOFlowParameters(request)).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with On Behalf Of Authentication.", null, e));
}
}
public AccessToken authenticateWithExchangeTokenSync(TokenRequestContext request) {
try {
String assertionToken = clientAssertionAccessor.getValue();
return authenticateWithExchangeTokenHelper(request, assertionToken);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider() {
return appTokenProviderParameters -> {
TokenRequestContext trc = new TokenRequestContext()
.setScopes(new ArrayList<>(appTokenProviderParameters.scopes))
.setClaims(appTokenProviderParameters.claims)
.setTenantId(appTokenProviderParameters.tenantId);
AccessToken accessToken = authenticateWithExchangeTokenSync(trc);
Supplier<TokenProviderResult> tokenProviderResultSupplier = () -> {
TokenProviderResult result = new TokenProviderResult();
result.setAccessToken(accessToken.getToken());
result.setTenantId(trc.getTenantId());
result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond());
return result;
};
return options.getExecutorService() != null
? CompletableFuture.supplyAsync(tokenProviderResultSupplier, options.getExecutorService())
: CompletableFuture.supplyAsync(tokenProviderResultSupplier);
};
}
public AccessToken authenticateWithWorkloadIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient =
workloadIdentityConfidentialClientApplicationAccessor.getValue();
try {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Get the configured identity client options.
*
* @return the client options.
*/
public IdentityClientOptions getIdentityClientOptions() {
return options;
}
@Override
Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext) {
return null;
}
} | class IdentitySyncClient extends IdentityClientBase {
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessor;
private final SynchronousAccessor<PublicClientApplication> publicClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessorWithCae;
private final SynchronousAccessor<ConfidentialClientApplication> managedIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<ConfidentialClientApplication> workloadIdentityConfidentialClientApplicationAccessor;
private final SynchronousAccessor<String> clientAssertionAccessor;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param clientSecret the client secret of the application.
* @param resourceId the resource ID of the application
* @param certificatePath the path to the PKCS12 or PEM certificate of the application.
* @param certificate the PKCS12 or PEM certificate of the application.
* @param certificatePassword the password protecting the PFX certificate.
* @param isSharedTokenCacheCredential Indicate whether the credential is
* {@link com.azure.identity.SharedTokenCacheCredential} or not.
* @param clientAssertionTimeout the timeout to use for the client assertion.
* @param options the options configuring the client.
*/
IdentitySyncClient(String tenantId, String clientId, String clientSecret, String certificatePath,
String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier,
InputStream certificate, String certificatePassword, boolean isSharedTokenCacheCredential,
Duration clientAssertionTimeout, IdentityClientOptions options) {
super(tenantId, clientId, clientSecret, certificatePath, clientAssertionFilePath, resourceId, clientAssertionSupplier,
certificate, certificatePassword, isSharedTokenCacheCredential, clientAssertionTimeout, options);
this.publicClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, false));
this.publicClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getPublicClient(isSharedTokenCacheCredential, true));
this.confidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getConfidentialClient(false));
this.confidentialClientApplicationAccessorWithCae = new SynchronousAccessor<>(() ->
this.getConfidentialClient(true));
this.managedIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getManagedIdentityConfidentialClient());
this.workloadIdentityConfidentialClientApplicationAccessor = new SynchronousAccessor<>(() ->
this.getWorkloadIdentityConfidentialClient());
this.clientAssertionAccessor = clientAssertionTimeout == null
? new SynchronousAccessor<>(() -> parseClientAssertion(), Duration.ofMinutes(5))
: new SynchronousAccessor<>(() -> parseClientAssertion(), clientAssertionTimeout);
}
private String parseClientAssertion() {
if (clientAssertionFilePath != null) {
try {
byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath));
return new String(encoded, StandardCharsets.UTF_8);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} else {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"Client Assertion File Path is not provided."
+ " It should be provided to authenticate with client assertion."
));
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = getConfidentialClientInstance(request).getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
if (clientAssertionSupplier != null) {
builder.clientCredential(ClientCredentialFactory
.createFromClientAssertion(clientAssertionSupplier.get()));
}
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (InterruptedException | ExecutionException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
private SynchronousAccessor<ConfidentialClientApplication> getConfidentialClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? confidentialClientApplicationAccessorWithCae : confidentialClientApplicationAccessor;
}
private SynchronousAccessor<PublicClientApplication> getPublicClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? publicClientApplicationAccessorWithCae : publicClientApplicationAccessor;
}
public AccessToken authenticateWithManagedIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient = managedIdentityConfidentialClientApplicationAccessor.getValue();
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
public AccessToken authenticateWithConfidentialClientCache(TokenRequestContext request) {
ConfidentialClientApplication confidentialClientApplication = getConfidentialClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
try {
IAuthenticationResult authenticationResult = confidentialClientApplication.acquireTokenSilently(parametersBuilder.build()).get();
AccessToken accessToken = new MsalToken(authenticationResult);
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
} else {
throw new IllegalStateException("Received token is close to expiry.");
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
* @return a Publisher that emits an AccessToken
*/
@SuppressWarnings("deprecation")
public MsalToken authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()));
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
if (account != null) {
parametersBuilder = parametersBuilder.account(account);
}
parametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
MsalToken accessToken = new MsalToken(pc.acquireTokenSilently(parametersBuilder.build()).get());
if (OffsetDateTime.now().isBefore(accessToken.getExpiresAt().minus(REFRESH_OFFSET))) {
return accessToken;
}
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
SilentParameters.SilentParametersBuilder forceParametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes())).forceRefresh(true);
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest
.formatAsClaimsRequest(request.getClaims());
forceParametersBuilder.claims(customClaimRequest);
}
if (account != null) {
forceParametersBuilder = forceParametersBuilder.account(account);
}
forceParametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
return new MsalToken(pc.acquireTokenSilently(forceParametersBuilder.build()).get());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e.getMessage(), e));
} catch (ExecutionException | InterruptedException e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(e.getMessage(), null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder =
buildUsernamePasswordFlowParameters(request, username, password);
try {
return new MsalToken(pc.acquireToken(userNamePasswordParametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with username and "
+ "password. To mitigate this issue, please refer to the troubleshooting guidelines "
+ "here at https:
null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public MsalToken authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
PublicClientApplication pc = getPublicClientInstance(request).getValue();
DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = buildDeviceCodeFlowParameters(request, deviceCodeConsumer);
try {
return new MsalToken(pc.acquireToken(parametersBuilder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with device code.", null, e));
}
}
/**
* Synchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @param redirectUrl the redirect URL to listen on and receive security code
* @param loginHint the username suggestion to pre-fill the login page's username/email address field
* @return a Publisher that emits an AccessToken
*/
public MsalToken authenticateWithBrowserInteraction(TokenRequestContext request, Integer port,
String redirectUrl, String loginHint) {
URI redirectUri;
String redirect;
if (port != null) {
redirect = HTTP_LOCALHOST + ":" + port;
} else if (redirectUrl != null) {
redirect = redirectUrl;
} else {
redirect = HTTP_LOCALHOST;
}
try {
redirectUri = new URI(redirect);
} catch (URISyntaxException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
InteractiveRequestParameters.InteractiveRequestParametersBuilder builder =
buildInteractiveRequestParameters(request, loginHint, redirectUri);
PublicClientApplication pc = getPublicClientInstance(request).getValue();
try {
return new MsalToken(pc.acquireToken(builder.build()).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(
"Failed to acquire token with Interactive Browser Authentication.", null, e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
/**
* Asynchronously acquire a token from Active Directory with Azure Developer CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithAzureDeveloperCli(TokenRequestContext request) {
StringBuilder azdCommand = new StringBuilder("azd auth token --output json --scope ");
List<String> scopes = request.getScopes();
if (scopes.size() == 0) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("Missing scope in request"));
}
scopes.forEach(scope -> {
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
});
azdCommand.append(String.join(" --scope ", scopes));
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azdCommand.append(" --tenant-id ").append(tenant);
}
try {
return getTokenFromAzureDeveloperCLIAuthentication(azdCommand);
} catch (RuntimeException e) {
throw (e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public AccessToken authenticateWithOBO(TokenRequestContext request) {
ConfidentialClientApplication cc = getConfidentialClientInstance(request).getValue();
try {
return new MsalToken(cc.acquireToken(buildOBOFlowParameters(request)).get());
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to acquire token with On Behalf Of Authentication.", null, e));
}
}
public AccessToken authenticateWithExchangeTokenSync(TokenRequestContext request) {
try {
String assertionToken = clientAssertionAccessor.getValue();
return authenticateWithExchangeTokenHelper(request, assertionToken);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider() {
return appTokenProviderParameters -> {
TokenRequestContext trc = new TokenRequestContext()
.setScopes(new ArrayList<>(appTokenProviderParameters.scopes))
.setClaims(appTokenProviderParameters.claims)
.setTenantId(appTokenProviderParameters.tenantId);
AccessToken accessToken = authenticateWithExchangeTokenSync(trc);
Supplier<TokenProviderResult> tokenProviderResultSupplier = () -> {
TokenProviderResult result = new TokenProviderResult();
result.setAccessToken(accessToken.getToken());
result.setTenantId(trc.getTenantId());
result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond());
return result;
};
return options.getExecutorService() != null
? CompletableFuture.supplyAsync(tokenProviderResultSupplier, options.getExecutorService())
: CompletableFuture.supplyAsync(tokenProviderResultSupplier);
};
}
public AccessToken authenticateWithWorkloadIdentityConfidentialClient(TokenRequestContext request) {
ConfidentialClientApplication confidentialClient =
workloadIdentityConfidentialClientApplicationAccessor.getValue();
try {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return new MsalToken(confidentialClient.acquireToken(builder.build()).get());
} catch (Exception e) {
throw new CredentialUnavailableException("Managed Identity authentication is not available.", e);
}
}
/**
* Get the configured identity client options.
*
* @return the client options.
*/
public IdentityClientOptions getIdentityClientOptions() {
return options;
}
@Override
Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext) {
return null;
}
} |
Any doc or evidence to support this behavior(if null, then true)? | public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
} | return true; | public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
private StorageAccountCreateParameters createParameters;
private StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager) {
super(name, innerModel, storageManager);
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
@Override
public boolean isDefaultToOAuthAuthentication() {
if (this.innerModel().defaultToOAuthAuthentication() == null) {
return true;
}
return this.innerModel().defaultToOAuthAuthentication();
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
}
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
if (this.innerModel().identity() == null) {
if (isInCreateMode()) {
createParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED));
} else {
updateParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED));
}
}
return this;
}
@Override
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl withAllowCrossTenantReplication(boolean enabled) {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(enabled);
} else {
updateParameters.withAllowCrossTenantReplication(enabled);
}
return this;
}
@Override
public StorageAccountImpl withDefaultToOAuthAuthentication(boolean enabled) {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(enabled);
} else {
updateParameters.withDefaultToOAuthAuthentication(enabled);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
private StorageAccountCreateParameters createParameters;
private StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager) {
super(name, innerModel, storageManager);
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
}
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
if (this.innerModel().identity() == null) {
if (isInCreateMode()) {
createParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED));
} else {
updateParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED));
}
}
return this;
}
@Override
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} |
Yeah, please check Portal or CLI. If they didn't show, it be safer just use `Boolean` and allow null. Same for the other one. | public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
} | return true; | public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
private StorageAccountCreateParameters createParameters;
private StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager) {
super(name, innerModel, storageManager);
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
@Override
public boolean isDefaultToOAuthAuthentication() {
if (this.innerModel().defaultToOAuthAuthentication() == null) {
return true;
}
return this.innerModel().defaultToOAuthAuthentication();
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
}
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
if (this.innerModel().identity() == null) {
if (isInCreateMode()) {
createParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED));
} else {
updateParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED));
}
}
return this;
}
@Override
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl withAllowCrossTenantReplication(boolean enabled) {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(enabled);
} else {
updateParameters.withAllowCrossTenantReplication(enabled);
}
return this;
}
@Override
public StorageAccountImpl withDefaultToOAuthAuthentication(boolean enabled) {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(enabled);
} else {
updateParameters.withDefaultToOAuthAuthentication(enabled);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
private StorageAccountCreateParameters createParameters;
private StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager) {
super(name, innerModel, storageManager);
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
}
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
if (this.innerModel().identity() == null) {
if (isInCreateMode()) {
createParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED));
} else {
updateParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED));
}
}
return this;
}
@Override
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} |
Now fixed to `return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().allowCrossTenantReplication());`. The another one also had the same fix. | public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
} | return true; | public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
private StorageAccountCreateParameters createParameters;
private StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager) {
super(name, innerModel, storageManager);
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
@Override
public boolean isDefaultToOAuthAuthentication() {
if (this.innerModel().defaultToOAuthAuthentication() == null) {
return true;
}
return this.innerModel().defaultToOAuthAuthentication();
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
}
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
if (this.innerModel().identity() == null) {
if (isInCreateMode()) {
createParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED));
} else {
updateParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED));
}
}
return this;
}
@Override
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl withAllowCrossTenantReplication(boolean enabled) {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(enabled);
} else {
updateParameters.withAllowCrossTenantReplication(enabled);
}
return this;
}
@Override
public StorageAccountImpl withDefaultToOAuthAuthentication(boolean enabled) {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(enabled);
} else {
updateParameters.withDefaultToOAuthAuthentication(enabled);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
private StorageAccountCreateParameters createParameters;
private StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager) {
super(name, innerModel, storageManager);
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
}
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
if (this.innerModel().identity() == null) {
if (isInCreateMode()) {
createParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED));
} else {
updateParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED));
}
}
return this;
}
@Override
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} |
Could you confirm at default (when send `null`), it is true or false? | public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
} | return true; | public boolean isAllowCrossTenantReplication() {
if (this.innerModel().allowCrossTenantReplication() == null) {
return true;
}
return this.innerModel().allowCrossTenantReplication();
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
private StorageAccountCreateParameters createParameters;
private StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager) {
super(name, innerModel, storageManager);
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
@Override
public boolean isDefaultToOAuthAuthentication() {
if (this.innerModel().defaultToOAuthAuthentication() == null) {
return true;
}
return this.innerModel().defaultToOAuthAuthentication();
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
}
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
if (this.innerModel().identity() == null) {
if (isInCreateMode()) {
createParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED));
} else {
updateParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED));
}
}
return this;
}
@Override
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl withAllowCrossTenantReplication(boolean enabled) {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(enabled);
} else {
updateParameters.withAllowCrossTenantReplication(enabled);
}
return this;
}
@Override
public StorageAccountImpl withDefaultToOAuthAuthentication(boolean enabled) {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(enabled);
} else {
updateParameters.withDefaultToOAuthAuthentication(enabled);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} | class StorageAccountImpl
extends GroupableResourceImpl<StorageAccount, StorageAccountInner, StorageAccountImpl, StorageManager>
implements StorageAccount, StorageAccount.Definition, StorageAccount.Update {
private final ClientLogger logger = new ClientLogger(getClass());
private PublicEndpoints publicEndpoints;
private AccountStatuses accountStatuses;
private StorageAccountCreateParameters createParameters;
private StorageAccountUpdateParameters updateParameters;
private StorageNetworkRulesHelper networkRulesHelper;
private StorageEncryptionHelper encryptionHelper;
StorageAccountImpl(String name, StorageAccountInner innerModel, final StorageManager storageManager) {
super(name, innerModel, storageManager);
this.createParameters = new StorageAccountCreateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.createParameters);
this.encryptionHelper = new StorageEncryptionHelper(this.createParameters);
}
@Override
public AccountStatuses accountStatuses() {
if (accountStatuses == null) {
accountStatuses = new AccountStatuses(this.innerModel().statusOfPrimary(), this.innerModel().statusOfSecondary());
}
return accountStatuses;
}
@Override
public StorageAccountSkuType skuType() {
return StorageAccountSkuType.fromSkuName(this.innerModel().sku().name());
}
@Override
public Kind kind() {
return innerModel().kind();
}
@Override
public OffsetDateTime creationTime() {
return this.innerModel().creationTime();
}
@Override
public CustomDomain customDomain() {
return this.innerModel().customDomain();
}
@Override
public OffsetDateTime lastGeoFailoverTime() {
return this.innerModel().lastGeoFailoverTime();
}
@Override
public ProvisioningState provisioningState() {
return this.innerModel().provisioningState();
}
@Override
public PublicEndpoints endPoints() {
if (publicEndpoints == null) {
publicEndpoints = new PublicEndpoints(this.innerModel().primaryEndpoints(), this.innerModel().secondaryEndpoints());
}
return publicEndpoints;
}
@Override
public StorageAccountEncryptionKeySource encryptionKeySource() {
return StorageEncryptionHelper.encryptionKeySource(this.innerModel());
}
@Override
public Map<StorageService, StorageAccountEncryptionStatus> encryptionStatuses() {
return StorageEncryptionHelper.encryptionStatuses(this.innerModel());
}
@Override
public boolean infrastructureEncryptionEnabled() {
return StorageEncryptionHelper.infrastructureEncryptionEnabled(this.innerModel());
}
@Override
public AccessTier accessTier() {
return innerModel().accessTier();
}
@Override
public String systemAssignedManagedServiceIdentityTenantId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().tenantId();
}
}
@Override
public String systemAssignedManagedServiceIdentityPrincipalId() {
if (this.innerModel().identity() == null) {
return null;
} else {
return this.innerModel().identity().principalId();
}
}
@Override
public boolean isAccessAllowedFromAllNetworks() {
return StorageNetworkRulesHelper.isAccessAllowedFromAllNetworks(this.innerModel());
}
@Override
public List<String> networkSubnetsWithAccess() {
return StorageNetworkRulesHelper.networkSubnetsWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressesWithAccess() {
return StorageNetworkRulesHelper.ipAddressesWithAccess(this.innerModel());
}
@Override
public List<String> ipAddressRangesWithAccess() {
return StorageNetworkRulesHelper.ipAddressRangesWithAccess(this.innerModel());
}
@Override
public boolean canReadLogEntriesFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadLogEntriesFromAnyNetwork(this.innerModel());
}
@Override
public boolean canReadMetricsFromAnyNetwork() {
return StorageNetworkRulesHelper.canReadMetricsFromAnyNetwork(this.innerModel());
}
@Override
public boolean canAccessFromAzureServices() {
return StorageNetworkRulesHelper.canAccessFromAzureServices(this.innerModel());
}
@Override
public boolean isAzureFilesAadIntegrationEnabled() {
return this.innerModel().azureFilesIdentityBasedAuthentication() != null
&& this.innerModel().azureFilesIdentityBasedAuthentication().directoryServiceOptions()
== DirectoryServiceOptions.AADDS;
}
@Override
public boolean isHnsEnabled() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().isHnsEnabled());
}
@Override
public boolean isLargeFileSharesEnabled() {
return this.innerModel().largeFileSharesState() == LargeFileSharesState.ENABLED;
}
@Override
public MinimumTlsVersion minimumTlsVersion() {
return this.innerModel().minimumTlsVersion();
}
@Override
public boolean isHttpsTrafficOnly() {
if (this.innerModel().enableHttpsTrafficOnly() == null) {
return true;
}
return this.innerModel().enableHttpsTrafficOnly();
}
@Override
public boolean isBlobPublicAccessAllowed() {
if (this.innerModel().allowBlobPublicAccess() == null) {
return true;
}
return this.innerModel().allowBlobPublicAccess();
}
@Override
public boolean isSharedKeyAccessAllowed() {
if (this.innerModel().allowSharedKeyAccess() == null) {
return true;
}
return this.innerModel().allowSharedKeyAccess();
}
@Override
@Override
public boolean isDefaultToOAuthAuthentication() {
return ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().defaultToOAuthAuthentication());
}
@Override
public List<StorageAccountKey> getKeys() {
return this.getKeysAsync().block();
}
@Override
public Mono<List<StorageAccountKey>> getKeysAsync() {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.listKeysAsync(this.resourceGroupName(), this.name())
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public List<StorageAccountKey> regenerateKey(String keyName) {
return this.regenerateKeyAsync(keyName).block();
}
@Override
public Mono<List<StorageAccountKey>> regenerateKeyAsync(String keyName) {
return this
.manager()
.serviceClient()
.getStorageAccounts()
.regenerateKeyAsync(this.resourceGroupName(), this.name(),
new StorageAccountRegenerateKeyParameters().withKeyName(keyName))
.map(storageAccountListKeysResultInner -> storageAccountListKeysResultInner.keys());
}
@Override
public PagedIterable<PrivateLinkResource> listPrivateLinkResources() {
return new PagedIterable<>(listPrivateLinkResourcesAsync());
}
@Override
public PagedFlux<PrivateLinkResource> listPrivateLinkResourcesAsync() {
Mono<Response<List<PrivateLinkResource>>> retList = this.manager().serviceClient().getPrivateLinkResources()
.listByStorageAccountWithResponseAsync(this.resourceGroupName(), this.name())
.map(response -> new SimpleResponse<>(response, response.getValue().value().stream()
.map(PrivateLinkResourceImpl::new)
.collect(Collectors.toList())));
return PagedConverter.convertListToPagedFlux(retList);
}
@Override
public PagedIterable<PrivateEndpointConnection> listPrivateEndpointConnections() {
return new PagedIterable<>(listPrivateEndpointConnectionsAsync());
}
@Override
public PagedFlux<PrivateEndpointConnection> listPrivateEndpointConnectionsAsync() {
return PagedConverter.mapPage(this.manager().serviceClient().getPrivateEndpointConnections()
.listAsync(this.resourceGroupName(), this.name()), PrivateEndpointConnectionImpl::new);
}
@Override
public void approvePrivateEndpointConnection(String privateEndpointConnectionName) {
approvePrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> approvePrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.APPROVED)))
.then();
}
@Override
public void rejectPrivateEndpointConnection(String privateEndpointConnectionName) {
rejectPrivateEndpointConnectionAsync(privateEndpointConnectionName).block();
}
@Override
public Mono<Void> rejectPrivateEndpointConnectionAsync(String privateEndpointConnectionName) {
return this.manager().serviceClient().getPrivateEndpointConnections()
.putWithResponseAsync(this.resourceGroupName(), this.name(), privateEndpointConnectionName,
new PrivateEndpointConnectionInner().withPrivateLinkServiceConnectionState(
new PrivateLinkServiceConnectionState()
.withStatus(
PrivateEndpointServiceConnectionStatus.REJECTED)))
.then();
}
@Override
public Mono<StorageAccount> refreshAsync() {
return super
.refreshAsync()
.map(
storageAccount -> {
StorageAccountImpl impl = (StorageAccountImpl) storageAccount;
impl.clearWrapperProperties();
return impl;
});
}
@Override
protected Mono<StorageAccountInner> getInnerAsync() {
return this.manager().serviceClient().getStorageAccounts().getByResourceGroupAsync(this.resourceGroupName(), this.name());
}
@Override
public StorageAccountImpl withSku(StorageAccountSkuType sku) {
if (isInCreateMode()) {
createParameters.withSku(new Sku().withName(sku.name()));
} else {
updateParameters.withSku(new Sku().withName(sku.name()));
}
return this;
}
@Override
public StorageAccountImpl withBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKind() {
createParameters.withKind(Kind.STORAGE);
return this;
}
@Override
public StorageAccountImpl withGeneralPurposeAccountKindV2() {
createParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public StorageAccountImpl withBlockBlobStorageAccountKind() {
createParameters.withKind(Kind.BLOCK_BLOB_STORAGE);
return this;
}
@Override
public StorageAccountImpl withFileStorageAccountKind() {
createParameters.withKind(Kind.FILE_STORAGE);
return this;
}
@Override
public StorageAccountImpl withInfrastructureEncryption() {
this.encryptionHelper.withInfrastructureEncryption();
return this;
}
@Override
public StorageAccountImpl withBlobEncryption() {
this.encryptionHelper.withBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withFileEncryption() {
this.encryptionHelper.withFileEncryption();
return this;
}
@Override
public StorageAccountImpl withEncryptionKeyFromKeyVault(String keyVaultUri, String keyName, String keyVersion) {
this.encryptionHelper.withEncryptionKeyFromKeyVault(keyVaultUri, keyName, keyVersion);
return this;
}
@Override
public StorageAccountImpl withoutBlobEncryption() {
this.encryptionHelper.withoutBlobEncryption();
return this;
}
@Override
public StorageAccountImpl withoutFileEncryption() {
this.encryptionHelper.withoutFileEncryption();
return this;
}
@Override
public StorageAccountImpl withTableAccountScopedEncryptionKey() {
this.encryptionHelper.withTableEncryption();
return this;
}
@Override
public StorageAccountImpl withQueueAccountScopedEncryptionKey() {
this.encryptionHelper.withQueueEncryption();
return this;
}
private void clearWrapperProperties() {
accountStatuses = null;
publicEndpoints = null;
}
@Override
public StorageAccountImpl update() {
createParameters = null;
updateParameters = new StorageAccountUpdateParameters();
this.networkRulesHelper = new StorageNetworkRulesHelper(this.updateParameters, this.innerModel());
this.encryptionHelper = new StorageEncryptionHelper(this.updateParameters, this.innerModel());
return super.update();
}
@Override
public StorageAccountImpl withCustomDomain(CustomDomain customDomain) {
if (isInCreateMode()) {
createParameters.withCustomDomain(customDomain);
} else {
updateParameters.withCustomDomain(customDomain);
}
return this;
}
@Override
public StorageAccountImpl withCustomDomain(String name) {
return withCustomDomain(new CustomDomain().withName(name));
}
@Override
public StorageAccountImpl withCustomDomain(String name, boolean useSubDomain) {
return withCustomDomain(new CustomDomain().withName(name).withUseSubDomainName(useSubDomain));
}
@Override
public StorageAccountImpl withAccessTier(AccessTier accessTier) {
if (isInCreateMode()) {
createParameters.withAccessTier(accessTier);
} else {
if (this.innerModel().kind() != Kind.BLOB_STORAGE) {
throw logger.logExceptionAsError(new UnsupportedOperationException(
"Access tier can not be changed for general purpose storage accounts."));
}
updateParameters.withAccessTier(accessTier);
}
return this;
}
@Override
public StorageAccountImpl withSystemAssignedManagedServiceIdentity() {
if (this.innerModel().identity() == null) {
if (isInCreateMode()) {
createParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED));
} else {
updateParameters.withIdentity(new Identity().withType(IdentityType.SYSTEM_ASSIGNED));
}
}
return this;
}
@Override
public StorageAccountImpl withOnlyHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(true);
} else {
updateParameters.withEnableHttpsTrafficOnly(true);
}
return this;
}
@Override
public StorageAccountImpl withHttpAndHttpsTraffic() {
if (isInCreateMode()) {
createParameters.withEnableHttpsTrafficOnly(false);
} else {
updateParameters.withEnableHttpsTrafficOnly(false);
}
return this;
}
@Override
public StorageAccountImpl withMinimumTlsVersion(MinimumTlsVersion minimumTlsVersion) {
if (isInCreateMode()) {
createParameters.withMinimumTlsVersion(minimumTlsVersion);
} else {
updateParameters.withMinimumTlsVersion(minimumTlsVersion);
}
return this;
}
@Override
public StorageAccountImpl enableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(true);
} else {
updateParameters.withAllowBlobPublicAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableBlobPublicAccess() {
if (isInCreateMode()) {
createParameters.withAllowBlobPublicAccess(false);
} else {
updateParameters.withAllowBlobPublicAccess(false);
}
return this;
}
@Override
public StorageAccountImpl enableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(true);
} else {
updateParameters.withAllowSharedKeyAccess(true);
}
return this;
}
@Override
public StorageAccountImpl disableSharedKeyAccess() {
if (isInCreateMode()) {
createParameters.withAllowSharedKeyAccess(false);
} else {
updateParameters.withAllowSharedKeyAccess(false);
}
return this;
}
@Override
public StorageAccountImpl allowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(true);
} else {
updateParameters.withAllowCrossTenantReplication(true);
}
return this;
}
@Override
public StorageAccountImpl disallowCrossTenantReplication() {
if (isInCreateMode()) {
createParameters.withAllowCrossTenantReplication(false);
} else {
updateParameters.withAllowCrossTenantReplication(false);
}
return this;
}
@Override
public StorageAccountImpl enableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(true);
} else {
updateParameters.withDefaultToOAuthAuthentication(true);
}
return this;
}
@Override
public StorageAccountImpl disableDefaultToOAuthAuthentication() {
if (isInCreateMode()) {
createParameters.withDefaultToOAuthAuthentication(false);
} else {
updateParameters.withDefaultToOAuthAuthentication(false);
}
return this;
}
@Override
public StorageAccountImpl withAccessFromAllNetworks() {
this.networkRulesHelper.withAccessFromAllNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromSelectedNetworks() {
this.networkRulesHelper.withAccessFromSelectedNetworks();
return this;
}
@Override
public StorageAccountImpl withAccessFromNetworkSubnet(String subnetId) {
this.networkRulesHelper.withAccessFromNetworkSubnet(subnetId);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddress(String ipAddress) {
this.networkRulesHelper.withAccessFromIpAddress(ipAddress);
return this;
}
@Override
public StorageAccountImpl withAccessFromIpAddressRange(String ipAddressCidr) {
this.networkRulesHelper.withAccessFromIpAddressRange(ipAddressCidr);
return this;
}
@Override
public StorageAccountImpl withReadAccessToLogEntriesFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public StorageAccountImpl withAccessFromAzureServices() {
this.networkRulesHelper.withAccessAllowedFromAzureServices();
return this;
}
@Override
public StorageAccountImpl withoutNetworkSubnetAccess(String subnetId) {
this.networkRulesHelper.withoutNetworkSubnetAccess(subnetId);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressAccess(String ipAddress) {
this.networkRulesHelper.withoutIpAddressAccess(ipAddress);
return this;
}
@Override
public StorageAccountImpl withoutIpAddressRangeAccess(String ipAddressCidr) {
this.networkRulesHelper.withoutIpAddressRangeAccess(ipAddressCidr);
return this;
}
@Override
public Update withoutReadAccessToLoggingFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToLoggingFromAnyNetwork();
return this;
}
@Override
public Update withoutReadAccessToMetricsFromAnyNetwork() {
this.networkRulesHelper.withoutReadAccessToMetricsFromAnyNetwork();
return this;
}
@Override
public Update withoutAccessFromAzureServices() {
this.networkRulesHelper.withoutAccessFromAzureServices();
return this;
}
@Override
public Update upgradeToGeneralPurposeAccountKindV2() {
updateParameters.withKind(Kind.STORAGE_V2);
return this;
}
@Override
public Mono<StorageAccount> createResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
createParameters.withLocation(this.regionName());
createParameters.withTags(this.innerModel().tags());
final StorageAccountsClient client = this.manager().serviceClient().getStorageAccounts();
return this
.manager()
.serviceClient()
.getStorageAccounts()
.createAsync(this.resourceGroupName(), this.name(), createParameters)
.flatMap(
storageAccountInner ->
client
.getByResourceGroupAsync(resourceGroupName(), this.name())
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties()));
}
@Override
public Mono<StorageAccount> updateResourceAsync() {
this.networkRulesHelper.setDefaultActionIfRequired();
updateParameters.withTags(this.innerModel().tags());
return this
.manager()
.serviceClient()
.getStorageAccounts()
.updateAsync(resourceGroupName(), this.name(), updateParameters)
.map(innerToFluentMap(this))
.doOnNext(storageAccount -> clearWrapperProperties());
}
@Override
public StorageAccountImpl withAzureFilesAadIntegrationEnabled(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(
new AzureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS));
}
} else {
if (this.createParameters.azureFilesIdentityBasedAuthentication() == null) {
this
.createParameters
.withAzureFilesIdentityBasedAuthentication(new AzureFilesIdentityBasedAuthentication());
}
if (enabled) {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.AADDS);
} else {
this
.updateParameters
.azureFilesIdentityBasedAuthentication()
.withDirectoryServiceOptions(DirectoryServiceOptions.NONE);
}
}
return this;
}
@Override
public StorageAccountImpl withLargeFileShares(boolean enabled) {
if (isInCreateMode()) {
if (enabled) {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.ENABLED);
} else {
this.createParameters.withLargeFileSharesState(LargeFileSharesState.DISABLED);
}
}
return this;
}
@Override
public StorageAccountImpl withHnsEnabled(boolean enabled) {
this.createParameters.withIsHnsEnabled(enabled);
return this;
}
private static final class PrivateLinkResourceImpl implements PrivateLinkResource {
private final com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel;
private PrivateLinkResourceImpl(com.azure.resourcemanager.storage.models.PrivateLinkResource innerModel) {
this.innerModel = innerModel;
}
@Override
public String groupId() {
return innerModel.groupId();
}
@Override
public List<String> requiredMemberNames() {
return Collections.unmodifiableList(innerModel.requiredMembers());
}
@Override
public List<String> requiredDnsZoneNames() {
return Collections.unmodifiableList(innerModel.requiredZoneNames());
}
}
private static final class PrivateEndpointConnectionImpl implements PrivateEndpointConnection {
private final PrivateEndpointConnectionInner innerModel;
private final PrivateEndpoint privateEndpoint;
private final com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState;
private final PrivateEndpointConnectionProvisioningState provisioningState;
private PrivateEndpointConnectionImpl(PrivateEndpointConnectionInner innerModel) {
this.innerModel = innerModel;
this.privateEndpoint = innerModel.privateEndpoint() == null
? null
: new PrivateEndpoint(innerModel.privateEndpoint().id());
this.privateLinkServiceConnectionState = innerModel.privateLinkServiceConnectionState() == null
? null
: new com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState(
innerModel.privateLinkServiceConnectionState().status() == null
? null
: com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateEndpointServiceConnectionStatus
.fromString(innerModel.privateLinkServiceConnectionState().status().toString()),
innerModel.privateLinkServiceConnectionState().description(),
innerModel.privateLinkServiceConnectionState().actionRequired());
this.provisioningState = innerModel.provisioningState() == null
? null
: PrivateEndpointConnectionProvisioningState.fromString(innerModel.provisioningState().toString());
}
@Override
public String id() {
return innerModel.id();
}
@Override
public String name() {
return innerModel.name();
}
@Override
public String type() {
return innerModel.type();
}
@Override
public PrivateEndpoint privateEndpoint() {
return privateEndpoint;
}
@Override
public com.azure.resourcemanager.resources.fluentcore.arm.models.PrivateLinkServiceConnectionState
privateLinkServiceConnectionState() {
return privateLinkServiceConnectionState;
}
@Override
public PrivateEndpointConnectionProvisioningState provisioningState() {
return provisioningState;
}
}
} |
Have we ran these tests with Fiddler enabled to verify this request's x-ms-file-request-intent and x-ms-copy-source-authorization headers are populated? | public void uploadRangeFromURLOAuth() {
ShareServiceAsyncClient oAuthServiceClient = getOAuthServiceClientAsyncSharedKey(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryAsyncClient dirClient = oAuthServiceClient.getShareAsyncClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create().block();
String fileName = generatePathName();
ShareFileAsyncClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
int sourceOffset = 5;
int length = 5;
int destinationOffset = 0;
fileClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(fileClient.getShareName())
.setFilePath(fileClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
String fileNameDest = generatePathName();
ShareFileAsyncClient fileClientDest = dirClient.getFileClient(fileNameDest);
fileClientDest.create(1024).block();
StepVerifier.create(fileClientDest.uploadRangeFromUrlWithResponse(length,
destinationOffset, sourceOffset, fileClient.getFileUrl() + "?" + sasToken))
.assertNext(r -> assertEquals(r.getStatusCode(), 201))
.verifyComplete();
StepVerifier.create(fileClientDest.downloadWithResponse(null)
.flatMap(r -> {
assertTrue(r.getStatusCode() == 200 || r.getStatusCode() == 206);
ShareFileDownloadHeaders headers = r.getDeserializedHeaders();
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
return FluxUtil.collectBytesInByteBufferStream(r.getValue());
}))
.assertNext(bytes -> {
assertEquals(bytes[0], 117);
})
.verifyComplete();
} | StepVerifier.create(fileClientDest.uploadRangeFromUrlWithResponse(length, | public void uploadRangeFromURLOAuth() {
ShareServiceAsyncClient oAuthServiceClient = getOAuthServiceClientAsyncSharedKey(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryAsyncClient dirClient = oAuthServiceClient.getShareAsyncClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create().block();
String fileName = generatePathName();
ShareFileAsyncClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
int sourceOffset = 5;
int length = 5;
int destinationOffset = 0;
fileClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(fileClient.getShareName())
.setFilePath(fileClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
String fileNameDest = generatePathName();
ShareFileAsyncClient fileClientDest = dirClient.getFileClient(fileNameDest);
fileClientDest.create(1024).block();
StepVerifier.create(fileClientDest.uploadRangeFromUrlWithResponse(length,
destinationOffset, sourceOffset, fileClient.getFileUrl() + "?" + sasToken))
.assertNext(r -> assertEquals(r.getStatusCode(), 201))
.verifyComplete();
StepVerifier.create(fileClientDest.downloadWithResponse(null)
.flatMap(r -> {
assertTrue(r.getStatusCode() == 200 || r.getStatusCode() == 206);
ShareFileDownloadHeaders headers = r.getDeserializedHeaders();
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
return FluxUtil.collectBytesInByteBufferStream(r.getValue());
}))
.assertNext(bytes -> {
assertEquals(bytes[0], 117);
})
.verifyComplete();
} | class FileAsyncApiTests extends FileShareTestBase {
private ShareFileAsyncClient primaryFileAsyncClient;
private ShareClient shareClient;
private String shareName;
private String filePath;
private static Map<String, String> testMetadata;
private static ShareFileHttpHeaders httpHeaders;
private FileSmbProperties smbProperties;
private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL";
@BeforeEach
public void setup() {
shareName = generateShareName();
filePath = generatePathName();
shareClient = shareBuilderHelper(shareName).buildClient();
shareClient.create();
primaryFileAsyncClient = fileBuilderHelper(shareName, filePath).buildFileAsyncClient();
testMetadata = Collections.singletonMap("testmetadata", "value");
httpHeaders = new ShareFileHttpHeaders().setContentLanguage("en")
.setContentType("application/octet-stream");
smbProperties = new FileSmbProperties().setNtfsFileAttributes(EnumSet.<NtfsFileAttributes> of(NtfsFileAttributes.NORMAL));
}
@Test
public void getFileURL() {
String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount()
.getConnectionString()).getAccountName();
String expectURL = String.format("https:
String fileURL = primaryFileAsyncClient.getFileUrl();
assertEquals(expectURL, fileURL);
}
@Test
public void createFile() {
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, null, null, null, null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createFileError() {
StepVerifier.create(primaryFileAsyncClient.create(-1)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400, ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void createFileWithArgsFpk() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, httpHeaders, smbProperties, null,
testMetadata)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void createFileWithArgsFp() {
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, httpHeaders, smbProperties, FILE_PERMISSION,
testMetadata)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void createFileWithArgsError() {
StepVerifier.create(primaryFileAsyncClient.createWithResponse(-1, null, null, null, testMetadata))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void createLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.createWithResponse(DATA.getDefaultDataSizeLong() + 1, null, null,
null, null, new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void createLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.createWithResponse(DATA.getDefaultDataSizeLong() + 1, null, null,
null, null, new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
/*
* Tests downloading a file using a default clientThatdoesn't have a HttpClient passed to it.
*/
/*
* Tests downloading a file using a default client that doesn't have a HttpClient passed to it.
*/
@EnabledIf("com.azure.storage.file.share.FileShareTestBase
@ParameterizedTest
@ValueSource(ints = {
0,
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
})
public void downloadFileBufferCopy(int fileSize) throws IOException {
ShareServiceAsyncClient shareServiceAsyncClient = new ShareServiceClientBuilder()
.connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString())
.buildAsyncClient();
ShareFileAsyncClient fileClient = shareServiceAsyncClient.getShareAsyncClient(shareName)
.createFile(filePath, fileSize).block();
File file = FileShareTestHelper.getRandomFile(fileSize);
assertNotNull(fileClient);
fileClient.uploadFromFile(file.toPath().toString()).block();
File outFile = new File(generatePathName() + ".txt");
if (outFile.exists()) {
assertTrue(outFile.delete());
}
fileClient.downloadToFile(outFile.toPath().toString()).block();
assertTrue(FileShareTestHelper.compareFiles(file, outFile, 0, fileSize));
shareServiceAsyncClient.deleteShare(shareName).block();
outFile.delete();
file.delete();
}
@Test
public void uploadAndDownloadData() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null)).assertNext(response -> {
assertTrue((response.getStatusCode() == 200) || (response.getStatusCode() == 206));
ShareFileDownloadHeaders headers = response.getDeserializedHeaders();
assertEquals(DATA.getDefaultDataSizeLong(), headers.getContentLength());
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
FluxUtil.collectBytesInByteBufferStream(response.getValue())
.flatMap(actualData -> {
assertArrayEquals(DATA.getDefaultBytes(), actualData);
return Mono.empty();
});
}).verifyComplete();
}
@Test
public void uploadAndDownloadDataWithArgs() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong()).setOffset(1L)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(1,
DATA.getDefaultDataSizeLong()), true)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 206);
assertEquals(DATA.getDefaultDataSizeLong(), it.getDeserializedHeaders().getContentLength());
FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(actualData -> {
assertArrayEquals(DATA.getDefaultBytes(), actualData);
return Mono.empty();
});
}).verifyComplete();
}
@Test
public void uploadDataError() {
StepVerifier.create(primaryFileAsyncClient.upload(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong()))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void uploadLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(
DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())
.setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)))).expectNextCount(1);
}
@Test
public void uploadLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())
.setRequestConditions(new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid()))))
.verifyError(ShareStorageException.class);
}
@ParameterizedTest
@MethodSource("uploadDataLengthMismatchSupplier")
public void uploadDataLengthMismatch(long size, String errMsg) {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), size).setOffset(0L))).verifyErrorSatisfies(it -> {
assertInstanceOf(UnexpectedLengthException.class, it);
assertTrue(it.getMessage().contains(errMsg));
});
}
private static Stream<Arguments> uploadDataLengthMismatchSupplier() {
return Stream.of(
Arguments.of(6, "more than"),
Arguments.of(8, "less than"));
}
@Test
public void downloadDataError() {
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(0, 1023L), false))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void downloadLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null, null, new ShareRequestConditions()
.setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void downloadLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null, null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void uploadAndClearRange() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.upload(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 0)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(0, 6L), false))
.assertNext(it -> FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(data -> {
for (byte b : data) {
assertEquals(b, 0);
}
return Mono.empty();
})).verifyComplete();
}
@Test
public void uploadAndClearRangeWithArgs() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 1)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
}).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(1, 7L), false))
.assertNext(it -> FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(data -> {
for (byte b : data) {
assertEquals(b, 0);
}
return Mono.empty();
})).verifyComplete();
fullInfoData.clear();
}
@Test
public void clearRangeError() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRange(30)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 416, ShareErrorCode.INVALID_RANGE));
}
@Test
public void clearRangeErrorArgs() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.upload(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 20)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 416, ShareErrorCode.INVALID_RANGE));
fullInfoData.clear();
}
@Test
public void clearRangeLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(1, 0, new ShareRequestConditions()
.setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void clearRangeLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(1, 0, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void uploadFileDoesNotExist() {
File uploadFile = new File(testFolder.getPath() + "/fakefile.txt");
if (uploadFile.exists()) {
assertTrue(uploadFile.delete());
}
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(uploadFile.getPath()))
.verifyErrorSatisfies(it -> assertInstanceOf(NoSuchFileException.class, it.getCause()));
uploadFile.delete();
}
@Test
public void uploadAndDownloadFileExists() throws IOException {
String data = "Download file exists";
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
if (!downloadFile.exists()) {
assertTrue(downloadFile.createNewFile());
}
primaryFileAsyncClient.create(data.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes(StandardCharsets.UTF_8))),
data.length()).block();
StepVerifier.create(
primaryFileAsyncClient.downloadToFile(downloadFile.getPath())).verifyErrorSatisfies(it ->
assertInstanceOf(FileAlreadyExistsException.class, it.getCause()));
downloadFile.delete();
}
@Test
public void uploadAndDownloadToFileDoesNotExist() throws FileNotFoundException {
String data = "Download file does not exist";
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
if (downloadFile.exists()) {
assertTrue(downloadFile.delete());
}
primaryFileAsyncClient.create(data.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes(StandardCharsets.UTF_8))),
data.length()).block();
StepVerifier.create(primaryFileAsyncClient.downloadToFile(downloadFile.getPath())).assertNext(it ->
assertEquals(it.getContentLength(), data.length())).verifyComplete();
Scanner scanner = new Scanner(downloadFile).useDelimiter("\\Z");
assertEquals(data, scanner.next());
scanner.close();
downloadFile.delete();
}
@Test
public void uploadFromFileLease() throws IOException {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(
uploadFile, new ShareRequestConditions().setLeaseId(leaseId))).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void uploadFromFileLeaseFail() throws IOException {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(uploadFile, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void downloadToFileLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultFlux(),
DATA.getDefaultDataSizeLong())).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
StepVerifier.create(primaryFileAsyncClient.downloadToFileWithResponse(
downloadFile.toPath().toString(), null, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
downloadFile.delete();
}
@Test
public void downloadToFileLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultFlux(),
DATA.getDefaultDataSizeLong())).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
StepVerifier.create(primaryFileAsyncClient.downloadToFileWithResponse(
downloadFile.toPath().toString(), null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
downloadFile.delete();
}
@Disabled("Groovy version of this test was not asserting contents of result properly. Need to revisit this test.")
@Test
public void uploadRangeFromURL() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.upload(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
client.uploadRangeFromUrl(length, destinationOffset, sourceOffset, primaryFileAsyncClient.getFileUrl()
+ "?" + sasToken).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(client.download())).assertNext(it -> {
String result = new String(it);
for (int i = 0; i < length; i++) {
assertEquals(result.charAt((int) (destinationOffset + i)), data.charAt((int) (sourceOffset + i)));
}
}).verifyComplete();
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
@Test
public void uploadRangeFromURLLease() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
String leaseId = createLeaseClient(client).acquireLease().block();
StepVerifier.create(client.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset,
primaryFileAsyncClient.getFileUrl() + "?" + sasToken, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
}
@Test
public void uploadRangeFromURLLeaseFail() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
createLeaseClient(client).acquireLease().block();
StepVerifier.create(client.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset,
primaryFileAsyncClient.getFileUrl().toString() + "?" + sasToken,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void startCopy() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL,
new ShareFileCopyOptions(), getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId()))
.expectComplete().verify(Duration.ofMinutes(1));
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithArgs(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly,
boolean setArchiveAttribute, PermissionCopyModeType permissionType) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
if (setFilePermissionKey) {
smbProperties.setFilePermissionKey(filePermissionKey);
}
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, smbProperties,
setFilePermission ? FILE_PERMISSION : null, permissionType, ignoreReadOnly, setArchiveAttribute, null,
getPollingDuration(1000), null);
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyError() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL,
new ShareFileCopyOptions(), getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, null, null, null,
false, false, null, getPollingDuration(1000), new ShareRequestConditions().setLeaseId(leaseId));
StepVerifier.create(poller).assertNext(it -> {
assertNotNull(it.getValue().getCopyId());
}).expectComplete().verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyLeaseFail() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, null, null, null,
false, false, null, getPollingDuration(1000), new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithOptions(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly,
boolean setArchiveAttribute, PermissionCopyModeType permissionType) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
if (setFilePermissionKey) {
smbProperties.setFilePermissionKey(filePermissionKey);
}
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(setFilePermission ? FILE_PERMISSION : null)
.setIgnoreReadOnly(ignoreReadOnly)
.setArchiveAttribute(setArchiveAttribute)
.setPermissionCopyModeType(permissionType);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Test
public void startCopyWithOptionsIgnoreReadOnlyAndSetArchive() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setIgnoreReadOnly(true)
.setArchiveAttribute(true);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsFilePermission() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
FileSmbProperties properties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(),
smbProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(),
smbProperties.getFileLastWriteTime());
assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes());
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsChangeTime() {
ShareFileInfo client = primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
smbProperties.setFileChangeTime(testResourceNamer.now());
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
FileShareTestHelper.compareDatesWithPrecision(smbProperties.getFileChangeTime(),
Objects.requireNonNull(primaryFileAsyncClient.getProperties().block()).getSmbProperties()
.getFileChangeTime());
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsCopySmbFilePropertiesPermissionKey() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs)
.setFilePermissionKey(filePermissionKey);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
FileSmbProperties properties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(),
smbProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(),
smbProperties.getFileLastWriteTime());
assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes());
}
@Test
public void startCopyWithOptionsLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
}
@Test
public void startCopyWithOptionsInvalidLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = testResourceNamer.randomUuid();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions);
assertThrows(ShareStorageException.class, () -> primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000)).blockFirst());
}
@Test
public void startCopyWithOptionsMetadata() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setMetadata(testMetadata);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsWithOriginalSmbProperties() {
primaryFileAsyncClient.create(1024).block();
ShareFileProperties initialProperties = primaryFileAsyncClient.getProperties().block();
assertNotNull(initialProperties);
OffsetDateTime creationTime = initialProperties.getSmbProperties().getFileCreationTime();
OffsetDateTime lastWrittenTime = initialProperties.getSmbProperties().getFileLastWriteTime();
OffsetDateTime changedTime = initialProperties.getSmbProperties().getFileChangeTime();
EnumSet<NtfsFileAttributes> fileAttributes = initialProperties.getSmbProperties().getNtfsFileAttributes();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList()
.setCreatedOn(true)
.setLastWrittenOn(true)
.setChangedOn(true)
.setFileAttributes(true);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions)
.setSmbPropertiesToCopy(list);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
FileSmbProperties resultProperties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(creationTime, resultProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(lastWrittenTime, resultProperties.getFileLastWriteTime());
FileShareTestHelper.compareDatesWithPrecision(changedTime, resultProperties.getFileChangeTime());
assertEquals(fileAttributes, resultProperties.getNtfsFileAttributes());
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithOptionsCopySourceFileError(boolean createdOn, boolean lastWrittenOn, boolean changedOn,
boolean fileAttributes) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList()
.setCreatedOn(createdOn)
.setLastWrittenOn(lastWrittenOn)
.setChangedOn(changedOn)
.setFileAttributes(fileAttributes);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFileChangeTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE)
.setSmbPropertiesToCopy(list);
assertThrows(IllegalArgumentException.class, () -> primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000)));
}
@Disabled("TODO: Need to find a way of mocking pending copy status")
@Test
public void abortCopy() {
}
@Test
public void deleteFile() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse()).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileError() {
StepVerifier.create(primaryFileAsyncClient.delete()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void deleteFileLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse(new ShareRequestConditions().setLeaseId(leaseId)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse(new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void deleteIfExistsFile() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(null)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileThatDoesNotExist() {
ShareFileAsyncClient client = primaryFileAsyncClient.getFileAsyncClient(generateShareName());
Response<Boolean> response = client.deleteIfExistsWithResponse(null, null).block();
assertNotNull(response);
assertFalse(response.getValue());
assertEquals(response.getStatusCode(), 404);
assertNotEquals(Boolean.TRUE, client.exists().block());
}
@Test
public void deleteIfExistsFileThatWasAlreadyDeleted() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null, null, null).block();
assertEquals(Boolean.TRUE, primaryFileAsyncClient.deleteIfExists().block());
assertNotEquals(Boolean.TRUE, primaryFileAsyncClient.deleteIfExists().block());
}
@Test
public void deleteIfExistsFileLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(new ShareRequestConditions()
.setLeaseId(leaseId))).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202))
.verifyComplete();
}
@Test
public void deleteIfExistsFileLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void getProperties() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getETag());
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void getPropertiesLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse(
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void getPropertiesLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse(
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void getPropertiesError() {
StepVerifier.create(primaryFileAsyncClient.getProperties())
.verifyErrorSatisfies(it -> assertInstanceOf(ShareStorageException.class, it));
}
@Test
public void setHttpHeadersFpk() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, httpHeaders, smbProperties, null))
.assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void setHttpHeadersFp() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, httpHeaders, smbProperties,
FILE_PERMISSION)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void setHttpHeadersLease() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, null, null, null,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void setHttpHeadersLeaseFail() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, null, null, null,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void setHttpHeadersError() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
StepVerifier.create(primaryFileAsyncClient.setProperties(-1, null, null, null)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400, ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void setMetadata() {
primaryFileAsyncClient.createWithResponse(1024, httpHeaders, null, null, testMetadata).block();
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
StepVerifier.create(primaryFileAsyncClient.getProperties()).assertNext(it ->
assertEquals(testMetadata, it.getMetadata())).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(updatedMetadata))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.getProperties()).assertNext(it ->
assertEquals(updatedMetadata, it.getMetadata())).verifyComplete();
}
@Test
public void setMetadataError() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> errorMetadata = Collections.singletonMap("", "value");
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(errorMetadata))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void setMetadataLease() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> metadata = Collections.singletonMap("key", "value");
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(metadata,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void setMetadataLeaseFail() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> metadata = Collections.singletonMap("key", "value");
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(metadata,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void listRanges() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
StepVerifier.create(primaryFileAsyncClient.listRanges()).assertNext(it -> {
assertEquals(0, it.getStart());
assertEquals(1023, it.getEnd());
}).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesWithRange() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
StepVerifier.create(primaryFileAsyncClient.listRanges(new ShareFileRange(0, 511L)))
.assertNext(it -> {
assertEquals(0, it.getStart());
assertEquals(511, it.getEnd());
}).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesLease() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.listRanges(null, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesLeaseFail() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.listRanges(null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void listRangesDiff(List<FileRange> rangesToUpdate, List<FileRange> rangesToClear,
List<FileRange> expectedRanges, List<ClearRange> expectedClearRanges) {
String snapshotId = primaryFileAsyncClient.create(4 * Constants.MB)
.then(primaryFileAsyncClient.upload(Flux.just(FileShareTestHelper.getRandomByteBuffer(4 * Constants.MB)),
4 * Constants.MB))
.then(primaryFileServiceAsyncClient.getShareAsyncClient(primaryFileAsyncClient.getShareName())
.createSnapshot()
.map(ShareSnapshotInfo::getSnapshot))
.block();
Flux.fromIterable(rangesToUpdate)
.flatMap(it -> {
int size = (int) (it.getEnd() - it.getStart() + 1);
return primaryFileAsyncClient.uploadWithResponse(Flux.just(
FileShareTestHelper.getRandomByteBuffer(size)), size, it.getStart());
}).blockLast();
Flux.fromIterable(rangesToClear)
.flatMap(it -> primaryFileAsyncClient.clearRangeWithResponse(it.getEnd() - it.getStart() + 1,
it.getStart()))
.blockLast();
StepVerifier.create(primaryFileAsyncClient.listRangesDiff(snapshotId)).assertNext(it -> {
assertEquals(it.getRanges().size(), expectedRanges.size());
assertEquals(it.getClearRanges().size(), expectedClearRanges.size());
for (int i = 0; i < expectedRanges.size(); i++) {
FileRange actualRange = it.getRanges().get(i);
FileRange expectedRange = expectedRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
for (int i = 0; i < expectedClearRanges.size(); i++) {
ClearRange actualRange = it.getClearRanges().get(i);
ClearRange expectedRange = expectedClearRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
}).verifyComplete();
}
@Test
public void listHandles() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.listHandles()).verifyComplete();
}
@Test
public void listHandlesWithMaxResult() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.listHandles(2)).verifyComplete();
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void forceCloseHandleMin() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseHandle("1")).assertNext(it -> {
assertEquals(it.getClosedHandles(), 0);
assertEquals(it.getFailedHandles(), 0);
}).verifyComplete();
}
@Test
public void forceCloseHandleInvalidHandleID() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseHandle("invalidHandleId"))
.verifyErrorSatisfies(it -> assertInstanceOf(ShareStorageException.class, it));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void forceCloseAllHandlesMin() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseAllHandles())
.assertNext(it -> {
assertEquals(it.getClosedHandles(), 0);
assertEquals(it.getFailedHandles(), 0);
}).verifyComplete();
}
@Test
public void getSnapshotId() {
String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString();
ShareFileAsyncClient shareSnapshotClient = fileBuilderHelper(shareName, filePath).snapshot(snapshot)
.buildFileAsyncClient();
assertEquals(snapshot, shareSnapshotClient.getShareSnapshotId());
}
@Test
public void getShareName() {
assertEquals(shareName, primaryFileAsyncClient.getShareName());
}
@Test
public void getFilePath() {
assertEquals(filePath, primaryFileAsyncClient.getFilePath());
}
} | class FileAsyncApiTests extends FileShareTestBase {
private ShareFileAsyncClient primaryFileAsyncClient;
private ShareClient shareClient;
private String shareName;
private String filePath;
private static Map<String, String> testMetadata;
private static ShareFileHttpHeaders httpHeaders;
private FileSmbProperties smbProperties;
private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL";
@BeforeEach
public void setup() {
shareName = generateShareName();
filePath = generatePathName();
shareClient = shareBuilderHelper(shareName).buildClient();
shareClient.create();
primaryFileAsyncClient = fileBuilderHelper(shareName, filePath).buildFileAsyncClient();
testMetadata = Collections.singletonMap("testmetadata", "value");
httpHeaders = new ShareFileHttpHeaders().setContentLanguage("en")
.setContentType("application/octet-stream");
smbProperties = new FileSmbProperties().setNtfsFileAttributes(EnumSet.<NtfsFileAttributes> of(NtfsFileAttributes.NORMAL));
}
@Test
public void getFileURL() {
String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount()
.getConnectionString()).getAccountName();
String expectURL = String.format("https:
String fileURL = primaryFileAsyncClient.getFileUrl();
assertEquals(expectURL, fileURL);
}
@Test
public void createFile() {
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, null, null, null, null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createFileError() {
StepVerifier.create(primaryFileAsyncClient.create(-1)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400, ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void createFileWithArgsFpk() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, httpHeaders, smbProperties, null,
testMetadata)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void createFileWithArgsFp() {
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, httpHeaders, smbProperties, FILE_PERMISSION,
testMetadata)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void createFileWithArgsError() {
StepVerifier.create(primaryFileAsyncClient.createWithResponse(-1, null, null, null, testMetadata))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void createLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.createWithResponse(DATA.getDefaultDataSizeLong() + 1, null, null,
null, null, new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void createLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.createWithResponse(DATA.getDefaultDataSizeLong() + 1, null, null,
null, null, new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
/*
* Tests downloading a file using a default clientThatdoesn't have a HttpClient passed to it.
*/
/*
* Tests downloading a file using a default client that doesn't have a HttpClient passed to it.
*/
@EnabledIf("com.azure.storage.file.share.FileShareTestBase
@ParameterizedTest
@ValueSource(ints = {
0,
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
})
public void downloadFileBufferCopy(int fileSize) throws IOException {
ShareServiceAsyncClient shareServiceAsyncClient = new ShareServiceClientBuilder()
.connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString())
.buildAsyncClient();
ShareFileAsyncClient fileClient = shareServiceAsyncClient.getShareAsyncClient(shareName)
.createFile(filePath, fileSize).block();
File file = FileShareTestHelper.getRandomFile(fileSize);
assertNotNull(fileClient);
fileClient.uploadFromFile(file.toPath().toString()).block();
File outFile = new File(generatePathName() + ".txt");
if (outFile.exists()) {
assertTrue(outFile.delete());
}
fileClient.downloadToFile(outFile.toPath().toString()).block();
assertTrue(FileShareTestHelper.compareFiles(file, outFile, 0, fileSize));
shareServiceAsyncClient.deleteShare(shareName).block();
outFile.delete();
file.delete();
}
@Test
public void uploadAndDownloadData() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null)).assertNext(response -> {
assertTrue((response.getStatusCode() == 200) || (response.getStatusCode() == 206));
ShareFileDownloadHeaders headers = response.getDeserializedHeaders();
assertEquals(DATA.getDefaultDataSizeLong(), headers.getContentLength());
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
FluxUtil.collectBytesInByteBufferStream(response.getValue())
.flatMap(actualData -> {
assertArrayEquals(DATA.getDefaultBytes(), actualData);
return Mono.empty();
});
}).verifyComplete();
}
@Test
public void uploadAndDownloadDataWithArgs() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong()).setOffset(1L)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(1,
DATA.getDefaultDataSizeLong()), true)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 206);
assertEquals(DATA.getDefaultDataSizeLong(), it.getDeserializedHeaders().getContentLength());
FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(actualData -> {
assertArrayEquals(DATA.getDefaultBytes(), actualData);
return Mono.empty();
});
}).verifyComplete();
}
@Test
public void uploadDataError() {
StepVerifier.create(primaryFileAsyncClient.upload(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong()))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void uploadLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(
DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())
.setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)))).expectNextCount(1);
}
@Test
public void uploadLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())
.setRequestConditions(new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid()))))
.verifyError(ShareStorageException.class);
}
@ParameterizedTest
@MethodSource("uploadDataLengthMismatchSupplier")
public void uploadDataLengthMismatch(long size, String errMsg) {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), size).setOffset(0L))).verifyErrorSatisfies(it -> {
assertInstanceOf(UnexpectedLengthException.class, it);
assertTrue(it.getMessage().contains(errMsg));
});
}
private static Stream<Arguments> uploadDataLengthMismatchSupplier() {
return Stream.of(
Arguments.of(6, "more than"),
Arguments.of(8, "less than"));
}
@Test
public void downloadDataError() {
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(0, 1023L), false))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void downloadLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null, null, new ShareRequestConditions()
.setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void downloadLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null, null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void uploadAndClearRange() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.upload(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 0)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(0, 6L), false))
.assertNext(it -> FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(data -> {
for (byte b : data) {
assertEquals(b, 0);
}
return Mono.empty();
})).verifyComplete();
}
@Test
public void uploadAndClearRangeWithArgs() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 1)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
}).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(1, 7L), false))
.assertNext(it -> FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(data -> {
for (byte b : data) {
assertEquals(b, 0);
}
return Mono.empty();
})).verifyComplete();
fullInfoData.clear();
}
@Test
public void clearRangeError() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRange(30)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 416, ShareErrorCode.INVALID_RANGE));
}
@Test
public void clearRangeErrorArgs() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.upload(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 20)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 416, ShareErrorCode.INVALID_RANGE));
fullInfoData.clear();
}
@Test
public void clearRangeLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(1, 0, new ShareRequestConditions()
.setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void clearRangeLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(1, 0, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void uploadFileDoesNotExist() {
File uploadFile = new File(testFolder.getPath() + "/fakefile.txt");
if (uploadFile.exists()) {
assertTrue(uploadFile.delete());
}
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(uploadFile.getPath()))
.verifyErrorSatisfies(it -> assertInstanceOf(NoSuchFileException.class, it.getCause()));
uploadFile.delete();
}
@Test
public void uploadAndDownloadFileExists() throws IOException {
String data = "Download file exists";
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
if (!downloadFile.exists()) {
assertTrue(downloadFile.createNewFile());
}
primaryFileAsyncClient.create(data.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes(StandardCharsets.UTF_8))),
data.length()).block();
StepVerifier.create(
primaryFileAsyncClient.downloadToFile(downloadFile.getPath())).verifyErrorSatisfies(it ->
assertInstanceOf(FileAlreadyExistsException.class, it.getCause()));
downloadFile.delete();
}
@Test
public void uploadAndDownloadToFileDoesNotExist() throws FileNotFoundException {
String data = "Download file does not exist";
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
if (downloadFile.exists()) {
assertTrue(downloadFile.delete());
}
primaryFileAsyncClient.create(data.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes(StandardCharsets.UTF_8))),
data.length()).block();
StepVerifier.create(primaryFileAsyncClient.downloadToFile(downloadFile.getPath())).assertNext(it ->
assertEquals(it.getContentLength(), data.length())).verifyComplete();
Scanner scanner = new Scanner(downloadFile).useDelimiter("\\Z");
assertEquals(data, scanner.next());
scanner.close();
downloadFile.delete();
}
@Test
public void uploadFromFileLease() throws IOException {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(
uploadFile, new ShareRequestConditions().setLeaseId(leaseId))).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void uploadFromFileLeaseFail() throws IOException {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(uploadFile, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void downloadToFileLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultFlux(),
DATA.getDefaultDataSizeLong())).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
StepVerifier.create(primaryFileAsyncClient.downloadToFileWithResponse(
downloadFile.toPath().toString(), null, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
downloadFile.delete();
}
@Test
public void downloadToFileLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultFlux(),
DATA.getDefaultDataSizeLong())).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
StepVerifier.create(primaryFileAsyncClient.downloadToFileWithResponse(
downloadFile.toPath().toString(), null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
downloadFile.delete();
}
@Disabled("Groovy version of this test was not asserting contents of result properly. Need to revisit this test.")
@Test
public void uploadRangeFromURL() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.upload(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
client.uploadRangeFromUrl(length, destinationOffset, sourceOffset, primaryFileAsyncClient.getFileUrl()
+ "?" + sasToken).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(client.download())).assertNext(it -> {
String result = new String(it);
for (int i = 0; i < length; i++) {
assertEquals(result.charAt((int) (destinationOffset + i)), data.charAt((int) (sourceOffset + i)));
}
}).verifyComplete();
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
@Test
public void uploadRangeFromURLLease() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
String leaseId = createLeaseClient(client).acquireLease().block();
StepVerifier.create(client.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset,
primaryFileAsyncClient.getFileUrl() + "?" + sasToken, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
}
@Test
public void uploadRangeFromURLLeaseFail() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
createLeaseClient(client).acquireLease().block();
StepVerifier.create(client.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset,
primaryFileAsyncClient.getFileUrl().toString() + "?" + sasToken,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void startCopy() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL,
new ShareFileCopyOptions(), getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId()))
.expectComplete().verify(Duration.ofMinutes(1));
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithArgs(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly,
boolean setArchiveAttribute, PermissionCopyModeType permissionType) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
if (setFilePermissionKey) {
smbProperties.setFilePermissionKey(filePermissionKey);
}
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, smbProperties,
setFilePermission ? FILE_PERMISSION : null, permissionType, ignoreReadOnly, setArchiveAttribute, null,
getPollingDuration(1000), null);
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyError() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL,
new ShareFileCopyOptions(), getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, null, null, null,
false, false, null, getPollingDuration(1000), new ShareRequestConditions().setLeaseId(leaseId));
StepVerifier.create(poller).assertNext(it -> {
assertNotNull(it.getValue().getCopyId());
}).expectComplete().verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyLeaseFail() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, null, null, null,
false, false, null, getPollingDuration(1000), new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithOptions(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly,
boolean setArchiveAttribute, PermissionCopyModeType permissionType) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
if (setFilePermissionKey) {
smbProperties.setFilePermissionKey(filePermissionKey);
}
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(setFilePermission ? FILE_PERMISSION : null)
.setIgnoreReadOnly(ignoreReadOnly)
.setArchiveAttribute(setArchiveAttribute)
.setPermissionCopyModeType(permissionType);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Test
public void startCopyWithOptionsIgnoreReadOnlyAndSetArchive() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setIgnoreReadOnly(true)
.setArchiveAttribute(true);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsFilePermission() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
FileSmbProperties properties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(),
smbProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(),
smbProperties.getFileLastWriteTime());
assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes());
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsChangeTime() {
ShareFileInfo client = primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
smbProperties.setFileChangeTime(testResourceNamer.now());
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
FileShareTestHelper.compareDatesWithPrecision(smbProperties.getFileChangeTime(),
Objects.requireNonNull(primaryFileAsyncClient.getProperties().block()).getSmbProperties()
.getFileChangeTime());
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsCopySmbFilePropertiesPermissionKey() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs)
.setFilePermissionKey(filePermissionKey);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
FileSmbProperties properties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(),
smbProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(),
smbProperties.getFileLastWriteTime());
assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes());
}
@Test
public void startCopyWithOptionsLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
}
@Test
public void startCopyWithOptionsInvalidLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = testResourceNamer.randomUuid();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions);
assertThrows(ShareStorageException.class, () -> primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000)).blockFirst());
}
@Test
public void startCopyWithOptionsMetadata() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setMetadata(testMetadata);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsWithOriginalSmbProperties() {
primaryFileAsyncClient.create(1024).block();
ShareFileProperties initialProperties = primaryFileAsyncClient.getProperties().block();
assertNotNull(initialProperties);
OffsetDateTime creationTime = initialProperties.getSmbProperties().getFileCreationTime();
OffsetDateTime lastWrittenTime = initialProperties.getSmbProperties().getFileLastWriteTime();
OffsetDateTime changedTime = initialProperties.getSmbProperties().getFileChangeTime();
EnumSet<NtfsFileAttributes> fileAttributes = initialProperties.getSmbProperties().getNtfsFileAttributes();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList()
.setCreatedOn(true)
.setLastWrittenOn(true)
.setChangedOn(true)
.setFileAttributes(true);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions)
.setSmbPropertiesToCopy(list);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
FileSmbProperties resultProperties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(creationTime, resultProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(lastWrittenTime, resultProperties.getFileLastWriteTime());
FileShareTestHelper.compareDatesWithPrecision(changedTime, resultProperties.getFileChangeTime());
assertEquals(fileAttributes, resultProperties.getNtfsFileAttributes());
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithOptionsCopySourceFileError(boolean createdOn, boolean lastWrittenOn, boolean changedOn,
boolean fileAttributes) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList()
.setCreatedOn(createdOn)
.setLastWrittenOn(lastWrittenOn)
.setChangedOn(changedOn)
.setFileAttributes(fileAttributes);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFileChangeTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE)
.setSmbPropertiesToCopy(list);
assertThrows(IllegalArgumentException.class, () -> primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000)));
}
@Disabled("TODO: Need to find a way of mocking pending copy status")
@Test
public void abortCopy() {
}
@Test
public void deleteFile() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse()).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileError() {
StepVerifier.create(primaryFileAsyncClient.delete()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void deleteFileLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse(new ShareRequestConditions().setLeaseId(leaseId)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse(new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void deleteIfExistsFile() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(null)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileThatDoesNotExist() {
ShareFileAsyncClient client = primaryFileAsyncClient.getFileAsyncClient(generateShareName());
Response<Boolean> response = client.deleteIfExistsWithResponse(null, null).block();
assertNotNull(response);
assertFalse(response.getValue());
assertEquals(response.getStatusCode(), 404);
assertNotEquals(Boolean.TRUE, client.exists().block());
}
@Test
public void deleteIfExistsFileThatWasAlreadyDeleted() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null, null, null).block();
assertEquals(Boolean.TRUE, primaryFileAsyncClient.deleteIfExists().block());
assertNotEquals(Boolean.TRUE, primaryFileAsyncClient.deleteIfExists().block());
}
@Test
public void deleteIfExistsFileLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(new ShareRequestConditions()
.setLeaseId(leaseId))).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202))
.verifyComplete();
}
@Test
public void deleteIfExistsFileLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void getProperties() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getETag());
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void getPropertiesLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse(
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void getPropertiesLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse(
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void getPropertiesError() {
StepVerifier.create(primaryFileAsyncClient.getProperties())
.verifyErrorSatisfies(it -> assertInstanceOf(ShareStorageException.class, it));
}
@Test
public void setHttpHeadersFpk() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, httpHeaders, smbProperties, null))
.assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void setHttpHeadersFp() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, httpHeaders, smbProperties,
FILE_PERMISSION)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void setHttpHeadersLease() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, null, null, null,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void setHttpHeadersLeaseFail() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, null, null, null,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void setHttpHeadersError() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
StepVerifier.create(primaryFileAsyncClient.setProperties(-1, null, null, null)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400, ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void setMetadata() {
primaryFileAsyncClient.createWithResponse(1024, httpHeaders, null, null, testMetadata).block();
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
StepVerifier.create(primaryFileAsyncClient.getProperties()).assertNext(it ->
assertEquals(testMetadata, it.getMetadata())).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(updatedMetadata))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.getProperties()).assertNext(it ->
assertEquals(updatedMetadata, it.getMetadata())).verifyComplete();
}
@Test
public void setMetadataError() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> errorMetadata = Collections.singletonMap("", "value");
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(errorMetadata))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void setMetadataLease() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> metadata = Collections.singletonMap("key", "value");
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(metadata,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void setMetadataLeaseFail() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> metadata = Collections.singletonMap("key", "value");
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(metadata,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void listRanges() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
StepVerifier.create(primaryFileAsyncClient.listRanges()).assertNext(it -> {
assertEquals(0, it.getStart());
assertEquals(1023, it.getEnd());
}).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesWithRange() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
StepVerifier.create(primaryFileAsyncClient.listRanges(new ShareFileRange(0, 511L)))
.assertNext(it -> {
assertEquals(0, it.getStart());
assertEquals(511, it.getEnd());
}).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesLease() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.listRanges(null, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesLeaseFail() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.listRanges(null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void listRangesDiff(List<FileRange> rangesToUpdate, List<FileRange> rangesToClear,
List<FileRange> expectedRanges, List<ClearRange> expectedClearRanges) {
String snapshotId = primaryFileAsyncClient.create(4 * Constants.MB)
.then(primaryFileAsyncClient.upload(Flux.just(FileShareTestHelper.getRandomByteBuffer(4 * Constants.MB)),
4 * Constants.MB))
.then(primaryFileServiceAsyncClient.getShareAsyncClient(primaryFileAsyncClient.getShareName())
.createSnapshot()
.map(ShareSnapshotInfo::getSnapshot))
.block();
Flux.fromIterable(rangesToUpdate)
.flatMap(it -> {
int size = (int) (it.getEnd() - it.getStart() + 1);
return primaryFileAsyncClient.uploadWithResponse(Flux.just(
FileShareTestHelper.getRandomByteBuffer(size)), size, it.getStart());
}).blockLast();
Flux.fromIterable(rangesToClear)
.flatMap(it -> primaryFileAsyncClient.clearRangeWithResponse(it.getEnd() - it.getStart() + 1,
it.getStart()))
.blockLast();
StepVerifier.create(primaryFileAsyncClient.listRangesDiff(snapshotId)).assertNext(it -> {
assertEquals(it.getRanges().size(), expectedRanges.size());
assertEquals(it.getClearRanges().size(), expectedClearRanges.size());
for (int i = 0; i < expectedRanges.size(); i++) {
FileRange actualRange = it.getRanges().get(i);
FileRange expectedRange = expectedRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
for (int i = 0; i < expectedClearRanges.size(); i++) {
ClearRange actualRange = it.getClearRanges().get(i);
ClearRange expectedRange = expectedClearRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
}).verifyComplete();
}
@Test
public void listHandles() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.listHandles()).verifyComplete();
}
@Test
public void listHandlesWithMaxResult() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.listHandles(2)).verifyComplete();
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void forceCloseHandleMin() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseHandle("1")).assertNext(it -> {
assertEquals(it.getClosedHandles(), 0);
assertEquals(it.getFailedHandles(), 0);
}).verifyComplete();
}
@Test
public void forceCloseHandleInvalidHandleID() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseHandle("invalidHandleId"))
.verifyErrorSatisfies(it -> assertInstanceOf(ShareStorageException.class, it));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void forceCloseAllHandlesMin() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseAllHandles())
.assertNext(it -> {
assertEquals(it.getClosedHandles(), 0);
assertEquals(it.getFailedHandles(), 0);
}).verifyComplete();
}
@Test
public void getSnapshotId() {
String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString();
ShareFileAsyncClient shareSnapshotClient = fileBuilderHelper(shareName, filePath).snapshot(snapshot)
.buildFileAsyncClient();
assertEquals(snapshot, shareSnapshotClient.getShareSnapshotId());
}
@Test
public void getShareName() {
assertEquals(shareName, primaryFileAsyncClient.getShareName());
}
@Test
public void getFilePath() {
assertEquals(filePath, primaryFileAsyncClient.getFilePath());
}
@EnabledIf("com.azure.storage.file.share.FileShareTestBase
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void listHandlesClientName() {
ShareAsyncClient client = primaryFileServiceAsyncClient.getShareAsyncClient("testing");
ShareDirectoryAsyncClient directoryClient = client.getDirectoryClient("dir1");
ShareFileAsyncClient fileClient = directoryClient.getFileClient("test.txt");
List<HandleItem> list = fileClient.listHandles().collectList().block();
assertNotNull(list.get(0).getClientName());
}
} |
I believe they are being populated. I see "x-ms-file-request-intent: backup" in fiddler. I don't see x-ms-source-authorization, but I do see "Authorization: SharedKey", if that is what you're referring to. | public void uploadRangeFromURLOAuth() {
ShareServiceAsyncClient oAuthServiceClient = getOAuthServiceClientAsyncSharedKey(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryAsyncClient dirClient = oAuthServiceClient.getShareAsyncClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create().block();
String fileName = generatePathName();
ShareFileAsyncClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
int sourceOffset = 5;
int length = 5;
int destinationOffset = 0;
fileClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(fileClient.getShareName())
.setFilePath(fileClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
String fileNameDest = generatePathName();
ShareFileAsyncClient fileClientDest = dirClient.getFileClient(fileNameDest);
fileClientDest.create(1024).block();
StepVerifier.create(fileClientDest.uploadRangeFromUrlWithResponse(length,
destinationOffset, sourceOffset, fileClient.getFileUrl() + "?" + sasToken))
.assertNext(r -> assertEquals(r.getStatusCode(), 201))
.verifyComplete();
StepVerifier.create(fileClientDest.downloadWithResponse(null)
.flatMap(r -> {
assertTrue(r.getStatusCode() == 200 || r.getStatusCode() == 206);
ShareFileDownloadHeaders headers = r.getDeserializedHeaders();
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
return FluxUtil.collectBytesInByteBufferStream(r.getValue());
}))
.assertNext(bytes -> {
assertEquals(bytes[0], 117);
})
.verifyComplete();
} | StepVerifier.create(fileClientDest.uploadRangeFromUrlWithResponse(length, | public void uploadRangeFromURLOAuth() {
ShareServiceAsyncClient oAuthServiceClient = getOAuthServiceClientAsyncSharedKey(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryAsyncClient dirClient = oAuthServiceClient.getShareAsyncClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create().block();
String fileName = generatePathName();
ShareFileAsyncClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
int sourceOffset = 5;
int length = 5;
int destinationOffset = 0;
fileClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(fileClient.getShareName())
.setFilePath(fileClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
String fileNameDest = generatePathName();
ShareFileAsyncClient fileClientDest = dirClient.getFileClient(fileNameDest);
fileClientDest.create(1024).block();
StepVerifier.create(fileClientDest.uploadRangeFromUrlWithResponse(length,
destinationOffset, sourceOffset, fileClient.getFileUrl() + "?" + sasToken))
.assertNext(r -> assertEquals(r.getStatusCode(), 201))
.verifyComplete();
StepVerifier.create(fileClientDest.downloadWithResponse(null)
.flatMap(r -> {
assertTrue(r.getStatusCode() == 200 || r.getStatusCode() == 206);
ShareFileDownloadHeaders headers = r.getDeserializedHeaders();
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
return FluxUtil.collectBytesInByteBufferStream(r.getValue());
}))
.assertNext(bytes -> {
assertEquals(bytes[0], 117);
})
.verifyComplete();
} | class FileAsyncApiTests extends FileShareTestBase {
private ShareFileAsyncClient primaryFileAsyncClient;
private ShareClient shareClient;
private String shareName;
private String filePath;
private static Map<String, String> testMetadata;
private static ShareFileHttpHeaders httpHeaders;
private FileSmbProperties smbProperties;
private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL";
@BeforeEach
public void setup() {
shareName = generateShareName();
filePath = generatePathName();
shareClient = shareBuilderHelper(shareName).buildClient();
shareClient.create();
primaryFileAsyncClient = fileBuilderHelper(shareName, filePath).buildFileAsyncClient();
testMetadata = Collections.singletonMap("testmetadata", "value");
httpHeaders = new ShareFileHttpHeaders().setContentLanguage("en")
.setContentType("application/octet-stream");
smbProperties = new FileSmbProperties().setNtfsFileAttributes(EnumSet.<NtfsFileAttributes> of(NtfsFileAttributes.NORMAL));
}
@Test
public void getFileURL() {
String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount()
.getConnectionString()).getAccountName();
String expectURL = String.format("https:
String fileURL = primaryFileAsyncClient.getFileUrl();
assertEquals(expectURL, fileURL);
}
@Test
public void createFile() {
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, null, null, null, null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createFileError() {
StepVerifier.create(primaryFileAsyncClient.create(-1)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400, ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void createFileWithArgsFpk() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, httpHeaders, smbProperties, null,
testMetadata)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void createFileWithArgsFp() {
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, httpHeaders, smbProperties, FILE_PERMISSION,
testMetadata)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void createFileWithArgsError() {
StepVerifier.create(primaryFileAsyncClient.createWithResponse(-1, null, null, null, testMetadata))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void createLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.createWithResponse(DATA.getDefaultDataSizeLong() + 1, null, null,
null, null, new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void createLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.createWithResponse(DATA.getDefaultDataSizeLong() + 1, null, null,
null, null, new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
/*
* Tests downloading a file using a default clientThatdoesn't have a HttpClient passed to it.
*/
/*
* Tests downloading a file using a default client that doesn't have a HttpClient passed to it.
*/
@EnabledIf("com.azure.storage.file.share.FileShareTestBase
@ParameterizedTest
@ValueSource(ints = {
0,
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
})
public void downloadFileBufferCopy(int fileSize) throws IOException {
ShareServiceAsyncClient shareServiceAsyncClient = new ShareServiceClientBuilder()
.connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString())
.buildAsyncClient();
ShareFileAsyncClient fileClient = shareServiceAsyncClient.getShareAsyncClient(shareName)
.createFile(filePath, fileSize).block();
File file = FileShareTestHelper.getRandomFile(fileSize);
assertNotNull(fileClient);
fileClient.uploadFromFile(file.toPath().toString()).block();
File outFile = new File(generatePathName() + ".txt");
if (outFile.exists()) {
assertTrue(outFile.delete());
}
fileClient.downloadToFile(outFile.toPath().toString()).block();
assertTrue(FileShareTestHelper.compareFiles(file, outFile, 0, fileSize));
shareServiceAsyncClient.deleteShare(shareName).block();
outFile.delete();
file.delete();
}
@Test
public void uploadAndDownloadData() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null)).assertNext(response -> {
assertTrue((response.getStatusCode() == 200) || (response.getStatusCode() == 206));
ShareFileDownloadHeaders headers = response.getDeserializedHeaders();
assertEquals(DATA.getDefaultDataSizeLong(), headers.getContentLength());
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
FluxUtil.collectBytesInByteBufferStream(response.getValue())
.flatMap(actualData -> {
assertArrayEquals(DATA.getDefaultBytes(), actualData);
return Mono.empty();
});
}).verifyComplete();
}
@Test
public void uploadAndDownloadDataWithArgs() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong()).setOffset(1L)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(1,
DATA.getDefaultDataSizeLong()), true)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 206);
assertEquals(DATA.getDefaultDataSizeLong(), it.getDeserializedHeaders().getContentLength());
FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(actualData -> {
assertArrayEquals(DATA.getDefaultBytes(), actualData);
return Mono.empty();
});
}).verifyComplete();
}
@Test
public void uploadDataError() {
StepVerifier.create(primaryFileAsyncClient.upload(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong()))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void uploadLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(
DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())
.setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)))).expectNextCount(1);
}
@Test
public void uploadLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())
.setRequestConditions(new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid()))))
.verifyError(ShareStorageException.class);
}
@ParameterizedTest
@MethodSource("uploadDataLengthMismatchSupplier")
public void uploadDataLengthMismatch(long size, String errMsg) {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), size).setOffset(0L))).verifyErrorSatisfies(it -> {
assertInstanceOf(UnexpectedLengthException.class, it);
assertTrue(it.getMessage().contains(errMsg));
});
}
private static Stream<Arguments> uploadDataLengthMismatchSupplier() {
return Stream.of(
Arguments.of(6, "more than"),
Arguments.of(8, "less than"));
}
@Test
public void downloadDataError() {
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(0, 1023L), false))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void downloadLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null, null, new ShareRequestConditions()
.setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void downloadLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null, null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void uploadAndClearRange() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.upload(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 0)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(0, 6L), false))
.assertNext(it -> FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(data -> {
for (byte b : data) {
assertEquals(b, 0);
}
return Mono.empty();
})).verifyComplete();
}
@Test
public void uploadAndClearRangeWithArgs() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 1)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
}).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(1, 7L), false))
.assertNext(it -> FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(data -> {
for (byte b : data) {
assertEquals(b, 0);
}
return Mono.empty();
})).verifyComplete();
fullInfoData.clear();
}
@Test
public void clearRangeError() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRange(30)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 416, ShareErrorCode.INVALID_RANGE));
}
@Test
public void clearRangeErrorArgs() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.upload(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 20)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 416, ShareErrorCode.INVALID_RANGE));
fullInfoData.clear();
}
@Test
public void clearRangeLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(1, 0, new ShareRequestConditions()
.setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void clearRangeLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(1, 0, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void uploadFileDoesNotExist() {
File uploadFile = new File(testFolder.getPath() + "/fakefile.txt");
if (uploadFile.exists()) {
assertTrue(uploadFile.delete());
}
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(uploadFile.getPath()))
.verifyErrorSatisfies(it -> assertInstanceOf(NoSuchFileException.class, it.getCause()));
uploadFile.delete();
}
@Test
public void uploadAndDownloadFileExists() throws IOException {
String data = "Download file exists";
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
if (!downloadFile.exists()) {
assertTrue(downloadFile.createNewFile());
}
primaryFileAsyncClient.create(data.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes(StandardCharsets.UTF_8))),
data.length()).block();
StepVerifier.create(
primaryFileAsyncClient.downloadToFile(downloadFile.getPath())).verifyErrorSatisfies(it ->
assertInstanceOf(FileAlreadyExistsException.class, it.getCause()));
downloadFile.delete();
}
@Test
public void uploadAndDownloadToFileDoesNotExist() throws FileNotFoundException {
String data = "Download file does not exist";
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
if (downloadFile.exists()) {
assertTrue(downloadFile.delete());
}
primaryFileAsyncClient.create(data.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes(StandardCharsets.UTF_8))),
data.length()).block();
StepVerifier.create(primaryFileAsyncClient.downloadToFile(downloadFile.getPath())).assertNext(it ->
assertEquals(it.getContentLength(), data.length())).verifyComplete();
Scanner scanner = new Scanner(downloadFile).useDelimiter("\\Z");
assertEquals(data, scanner.next());
scanner.close();
downloadFile.delete();
}
@Test
public void uploadFromFileLease() throws IOException {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(
uploadFile, new ShareRequestConditions().setLeaseId(leaseId))).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void uploadFromFileLeaseFail() throws IOException {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(uploadFile, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void downloadToFileLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultFlux(),
DATA.getDefaultDataSizeLong())).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
StepVerifier.create(primaryFileAsyncClient.downloadToFileWithResponse(
downloadFile.toPath().toString(), null, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
downloadFile.delete();
}
@Test
public void downloadToFileLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultFlux(),
DATA.getDefaultDataSizeLong())).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
StepVerifier.create(primaryFileAsyncClient.downloadToFileWithResponse(
downloadFile.toPath().toString(), null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
downloadFile.delete();
}
@Disabled("Groovy version of this test was not asserting contents of result properly. Need to revisit this test.")
@Test
public void uploadRangeFromURL() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.upload(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
client.uploadRangeFromUrl(length, destinationOffset, sourceOffset, primaryFileAsyncClient.getFileUrl()
+ "?" + sasToken).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(client.download())).assertNext(it -> {
String result = new String(it);
for (int i = 0; i < length; i++) {
assertEquals(result.charAt((int) (destinationOffset + i)), data.charAt((int) (sourceOffset + i)));
}
}).verifyComplete();
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
@Test
public void uploadRangeFromURLLease() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
String leaseId = createLeaseClient(client).acquireLease().block();
StepVerifier.create(client.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset,
primaryFileAsyncClient.getFileUrl() + "?" + sasToken, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
}
@Test
public void uploadRangeFromURLLeaseFail() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
createLeaseClient(client).acquireLease().block();
StepVerifier.create(client.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset,
primaryFileAsyncClient.getFileUrl().toString() + "?" + sasToken,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void startCopy() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL,
new ShareFileCopyOptions(), getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId()))
.expectComplete().verify(Duration.ofMinutes(1));
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithArgs(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly,
boolean setArchiveAttribute, PermissionCopyModeType permissionType) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
if (setFilePermissionKey) {
smbProperties.setFilePermissionKey(filePermissionKey);
}
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, smbProperties,
setFilePermission ? FILE_PERMISSION : null, permissionType, ignoreReadOnly, setArchiveAttribute, null,
getPollingDuration(1000), null);
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyError() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL,
new ShareFileCopyOptions(), getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, null, null, null,
false, false, null, getPollingDuration(1000), new ShareRequestConditions().setLeaseId(leaseId));
StepVerifier.create(poller).assertNext(it -> {
assertNotNull(it.getValue().getCopyId());
}).expectComplete().verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyLeaseFail() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, null, null, null,
false, false, null, getPollingDuration(1000), new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithOptions(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly,
boolean setArchiveAttribute, PermissionCopyModeType permissionType) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
if (setFilePermissionKey) {
smbProperties.setFilePermissionKey(filePermissionKey);
}
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(setFilePermission ? FILE_PERMISSION : null)
.setIgnoreReadOnly(ignoreReadOnly)
.setArchiveAttribute(setArchiveAttribute)
.setPermissionCopyModeType(permissionType);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Test
public void startCopyWithOptionsIgnoreReadOnlyAndSetArchive() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setIgnoreReadOnly(true)
.setArchiveAttribute(true);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsFilePermission() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
FileSmbProperties properties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(),
smbProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(),
smbProperties.getFileLastWriteTime());
assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes());
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsChangeTime() {
ShareFileInfo client = primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
smbProperties.setFileChangeTime(testResourceNamer.now());
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
FileShareTestHelper.compareDatesWithPrecision(smbProperties.getFileChangeTime(),
Objects.requireNonNull(primaryFileAsyncClient.getProperties().block()).getSmbProperties()
.getFileChangeTime());
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsCopySmbFilePropertiesPermissionKey() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs)
.setFilePermissionKey(filePermissionKey);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
FileSmbProperties properties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(),
smbProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(),
smbProperties.getFileLastWriteTime());
assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes());
}
@Test
public void startCopyWithOptionsLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
}
@Test
public void startCopyWithOptionsInvalidLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = testResourceNamer.randomUuid();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions);
assertThrows(ShareStorageException.class, () -> primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000)).blockFirst());
}
@Test
public void startCopyWithOptionsMetadata() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setMetadata(testMetadata);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsWithOriginalSmbProperties() {
primaryFileAsyncClient.create(1024).block();
ShareFileProperties initialProperties = primaryFileAsyncClient.getProperties().block();
assertNotNull(initialProperties);
OffsetDateTime creationTime = initialProperties.getSmbProperties().getFileCreationTime();
OffsetDateTime lastWrittenTime = initialProperties.getSmbProperties().getFileLastWriteTime();
OffsetDateTime changedTime = initialProperties.getSmbProperties().getFileChangeTime();
EnumSet<NtfsFileAttributes> fileAttributes = initialProperties.getSmbProperties().getNtfsFileAttributes();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList()
.setCreatedOn(true)
.setLastWrittenOn(true)
.setChangedOn(true)
.setFileAttributes(true);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions)
.setSmbPropertiesToCopy(list);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
FileSmbProperties resultProperties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(creationTime, resultProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(lastWrittenTime, resultProperties.getFileLastWriteTime());
FileShareTestHelper.compareDatesWithPrecision(changedTime, resultProperties.getFileChangeTime());
assertEquals(fileAttributes, resultProperties.getNtfsFileAttributes());
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithOptionsCopySourceFileError(boolean createdOn, boolean lastWrittenOn, boolean changedOn,
boolean fileAttributes) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList()
.setCreatedOn(createdOn)
.setLastWrittenOn(lastWrittenOn)
.setChangedOn(changedOn)
.setFileAttributes(fileAttributes);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFileChangeTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE)
.setSmbPropertiesToCopy(list);
assertThrows(IllegalArgumentException.class, () -> primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000)));
}
@Disabled("TODO: Need to find a way of mocking pending copy status")
@Test
public void abortCopy() {
}
@Test
public void deleteFile() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse()).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileError() {
StepVerifier.create(primaryFileAsyncClient.delete()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void deleteFileLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse(new ShareRequestConditions().setLeaseId(leaseId)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse(new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void deleteIfExistsFile() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(null)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileThatDoesNotExist() {
ShareFileAsyncClient client = primaryFileAsyncClient.getFileAsyncClient(generateShareName());
Response<Boolean> response = client.deleteIfExistsWithResponse(null, null).block();
assertNotNull(response);
assertFalse(response.getValue());
assertEquals(response.getStatusCode(), 404);
assertNotEquals(Boolean.TRUE, client.exists().block());
}
@Test
public void deleteIfExistsFileThatWasAlreadyDeleted() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null, null, null).block();
assertEquals(Boolean.TRUE, primaryFileAsyncClient.deleteIfExists().block());
assertNotEquals(Boolean.TRUE, primaryFileAsyncClient.deleteIfExists().block());
}
@Test
public void deleteIfExistsFileLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(new ShareRequestConditions()
.setLeaseId(leaseId))).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202))
.verifyComplete();
}
@Test
public void deleteIfExistsFileLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void getProperties() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getETag());
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void getPropertiesLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse(
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void getPropertiesLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse(
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void getPropertiesError() {
StepVerifier.create(primaryFileAsyncClient.getProperties())
.verifyErrorSatisfies(it -> assertInstanceOf(ShareStorageException.class, it));
}
@Test
public void setHttpHeadersFpk() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, httpHeaders, smbProperties, null))
.assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void setHttpHeadersFp() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, httpHeaders, smbProperties,
FILE_PERMISSION)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void setHttpHeadersLease() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, null, null, null,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void setHttpHeadersLeaseFail() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, null, null, null,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void setHttpHeadersError() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
StepVerifier.create(primaryFileAsyncClient.setProperties(-1, null, null, null)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400, ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void setMetadata() {
primaryFileAsyncClient.createWithResponse(1024, httpHeaders, null, null, testMetadata).block();
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
StepVerifier.create(primaryFileAsyncClient.getProperties()).assertNext(it ->
assertEquals(testMetadata, it.getMetadata())).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(updatedMetadata))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.getProperties()).assertNext(it ->
assertEquals(updatedMetadata, it.getMetadata())).verifyComplete();
}
@Test
public void setMetadataError() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> errorMetadata = Collections.singletonMap("", "value");
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(errorMetadata))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void setMetadataLease() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> metadata = Collections.singletonMap("key", "value");
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(metadata,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void setMetadataLeaseFail() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> metadata = Collections.singletonMap("key", "value");
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(metadata,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void listRanges() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
StepVerifier.create(primaryFileAsyncClient.listRanges()).assertNext(it -> {
assertEquals(0, it.getStart());
assertEquals(1023, it.getEnd());
}).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesWithRange() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
StepVerifier.create(primaryFileAsyncClient.listRanges(new ShareFileRange(0, 511L)))
.assertNext(it -> {
assertEquals(0, it.getStart());
assertEquals(511, it.getEnd());
}).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesLease() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.listRanges(null, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesLeaseFail() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.listRanges(null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void listRangesDiff(List<FileRange> rangesToUpdate, List<FileRange> rangesToClear,
List<FileRange> expectedRanges, List<ClearRange> expectedClearRanges) {
String snapshotId = primaryFileAsyncClient.create(4 * Constants.MB)
.then(primaryFileAsyncClient.upload(Flux.just(FileShareTestHelper.getRandomByteBuffer(4 * Constants.MB)),
4 * Constants.MB))
.then(primaryFileServiceAsyncClient.getShareAsyncClient(primaryFileAsyncClient.getShareName())
.createSnapshot()
.map(ShareSnapshotInfo::getSnapshot))
.block();
Flux.fromIterable(rangesToUpdate)
.flatMap(it -> {
int size = (int) (it.getEnd() - it.getStart() + 1);
return primaryFileAsyncClient.uploadWithResponse(Flux.just(
FileShareTestHelper.getRandomByteBuffer(size)), size, it.getStart());
}).blockLast();
Flux.fromIterable(rangesToClear)
.flatMap(it -> primaryFileAsyncClient.clearRangeWithResponse(it.getEnd() - it.getStart() + 1,
it.getStart()))
.blockLast();
StepVerifier.create(primaryFileAsyncClient.listRangesDiff(snapshotId)).assertNext(it -> {
assertEquals(it.getRanges().size(), expectedRanges.size());
assertEquals(it.getClearRanges().size(), expectedClearRanges.size());
for (int i = 0; i < expectedRanges.size(); i++) {
FileRange actualRange = it.getRanges().get(i);
FileRange expectedRange = expectedRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
for (int i = 0; i < expectedClearRanges.size(); i++) {
ClearRange actualRange = it.getClearRanges().get(i);
ClearRange expectedRange = expectedClearRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
}).verifyComplete();
}
@Test
public void listHandles() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.listHandles()).verifyComplete();
}
@Test
public void listHandlesWithMaxResult() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.listHandles(2)).verifyComplete();
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void forceCloseHandleMin() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseHandle("1")).assertNext(it -> {
assertEquals(it.getClosedHandles(), 0);
assertEquals(it.getFailedHandles(), 0);
}).verifyComplete();
}
@Test
public void forceCloseHandleInvalidHandleID() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseHandle("invalidHandleId"))
.verifyErrorSatisfies(it -> assertInstanceOf(ShareStorageException.class, it));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void forceCloseAllHandlesMin() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseAllHandles())
.assertNext(it -> {
assertEquals(it.getClosedHandles(), 0);
assertEquals(it.getFailedHandles(), 0);
}).verifyComplete();
}
@Test
public void getSnapshotId() {
String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString();
ShareFileAsyncClient shareSnapshotClient = fileBuilderHelper(shareName, filePath).snapshot(snapshot)
.buildFileAsyncClient();
assertEquals(snapshot, shareSnapshotClient.getShareSnapshotId());
}
@Test
public void getShareName() {
assertEquals(shareName, primaryFileAsyncClient.getShareName());
}
@Test
public void getFilePath() {
assertEquals(filePath, primaryFileAsyncClient.getFilePath());
}
} | class FileAsyncApiTests extends FileShareTestBase {
private ShareFileAsyncClient primaryFileAsyncClient;
private ShareClient shareClient;
private String shareName;
private String filePath;
private static Map<String, String> testMetadata;
private static ShareFileHttpHeaders httpHeaders;
private FileSmbProperties smbProperties;
private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL";
@BeforeEach
public void setup() {
shareName = generateShareName();
filePath = generatePathName();
shareClient = shareBuilderHelper(shareName).buildClient();
shareClient.create();
primaryFileAsyncClient = fileBuilderHelper(shareName, filePath).buildFileAsyncClient();
testMetadata = Collections.singletonMap("testmetadata", "value");
httpHeaders = new ShareFileHttpHeaders().setContentLanguage("en")
.setContentType("application/octet-stream");
smbProperties = new FileSmbProperties().setNtfsFileAttributes(EnumSet.<NtfsFileAttributes> of(NtfsFileAttributes.NORMAL));
}
@Test
public void getFileURL() {
String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount()
.getConnectionString()).getAccountName();
String expectURL = String.format("https:
String fileURL = primaryFileAsyncClient.getFileUrl();
assertEquals(expectURL, fileURL);
}
@Test
public void createFile() {
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, null, null, null, null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createFileError() {
StepVerifier.create(primaryFileAsyncClient.create(-1)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400, ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void createFileWithArgsFpk() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, httpHeaders, smbProperties, null,
testMetadata)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void createFileWithArgsFp() {
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, httpHeaders, smbProperties, FILE_PERMISSION,
testMetadata)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void createFileWithArgsError() {
StepVerifier.create(primaryFileAsyncClient.createWithResponse(-1, null, null, null, testMetadata))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void createLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.createWithResponse(DATA.getDefaultDataSizeLong() + 1, null, null,
null, null, new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void createLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.createWithResponse(DATA.getDefaultDataSizeLong() + 1, null, null,
null, null, new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
/*
* Tests downloading a file using a default clientThatdoesn't have a HttpClient passed to it.
*/
/*
* Tests downloading a file using a default client that doesn't have a HttpClient passed to it.
*/
@EnabledIf("com.azure.storage.file.share.FileShareTestBase
@ParameterizedTest
@ValueSource(ints = {
0,
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
})
public void downloadFileBufferCopy(int fileSize) throws IOException {
ShareServiceAsyncClient shareServiceAsyncClient = new ShareServiceClientBuilder()
.connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString())
.buildAsyncClient();
ShareFileAsyncClient fileClient = shareServiceAsyncClient.getShareAsyncClient(shareName)
.createFile(filePath, fileSize).block();
File file = FileShareTestHelper.getRandomFile(fileSize);
assertNotNull(fileClient);
fileClient.uploadFromFile(file.toPath().toString()).block();
File outFile = new File(generatePathName() + ".txt");
if (outFile.exists()) {
assertTrue(outFile.delete());
}
fileClient.downloadToFile(outFile.toPath().toString()).block();
assertTrue(FileShareTestHelper.compareFiles(file, outFile, 0, fileSize));
shareServiceAsyncClient.deleteShare(shareName).block();
outFile.delete();
file.delete();
}
@Test
public void uploadAndDownloadData() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null)).assertNext(response -> {
assertTrue((response.getStatusCode() == 200) || (response.getStatusCode() == 206));
ShareFileDownloadHeaders headers = response.getDeserializedHeaders();
assertEquals(DATA.getDefaultDataSizeLong(), headers.getContentLength());
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
FluxUtil.collectBytesInByteBufferStream(response.getValue())
.flatMap(actualData -> {
assertArrayEquals(DATA.getDefaultBytes(), actualData);
return Mono.empty();
});
}).verifyComplete();
}
@Test
public void uploadAndDownloadDataWithArgs() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong()).setOffset(1L)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(1,
DATA.getDefaultDataSizeLong()), true)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 206);
assertEquals(DATA.getDefaultDataSizeLong(), it.getDeserializedHeaders().getContentLength());
FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(actualData -> {
assertArrayEquals(DATA.getDefaultBytes(), actualData);
return Mono.empty();
});
}).verifyComplete();
}
@Test
public void uploadDataError() {
StepVerifier.create(primaryFileAsyncClient.upload(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong()))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void uploadLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(
DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())
.setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)))).expectNextCount(1);
}
@Test
public void uploadLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())
.setRequestConditions(new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid()))))
.verifyError(ShareStorageException.class);
}
@ParameterizedTest
@MethodSource("uploadDataLengthMismatchSupplier")
public void uploadDataLengthMismatch(long size, String errMsg) {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), size).setOffset(0L))).verifyErrorSatisfies(it -> {
assertInstanceOf(UnexpectedLengthException.class, it);
assertTrue(it.getMessage().contains(errMsg));
});
}
private static Stream<Arguments> uploadDataLengthMismatchSupplier() {
return Stream.of(
Arguments.of(6, "more than"),
Arguments.of(8, "less than"));
}
@Test
public void downloadDataError() {
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(0, 1023L), false))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void downloadLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null, null, new ShareRequestConditions()
.setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void downloadLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null, null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void uploadAndClearRange() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.upload(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 0)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(0, 6L), false))
.assertNext(it -> FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(data -> {
for (byte b : data) {
assertEquals(b, 0);
}
return Mono.empty();
})).verifyComplete();
}
@Test
public void uploadAndClearRangeWithArgs() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 1)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
}).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(1, 7L), false))
.assertNext(it -> FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(data -> {
for (byte b : data) {
assertEquals(b, 0);
}
return Mono.empty();
})).verifyComplete();
fullInfoData.clear();
}
@Test
public void clearRangeError() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRange(30)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 416, ShareErrorCode.INVALID_RANGE));
}
@Test
public void clearRangeErrorArgs() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.upload(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 20)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 416, ShareErrorCode.INVALID_RANGE));
fullInfoData.clear();
}
@Test
public void clearRangeLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(1, 0, new ShareRequestConditions()
.setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void clearRangeLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(1, 0, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void uploadFileDoesNotExist() {
File uploadFile = new File(testFolder.getPath() + "/fakefile.txt");
if (uploadFile.exists()) {
assertTrue(uploadFile.delete());
}
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(uploadFile.getPath()))
.verifyErrorSatisfies(it -> assertInstanceOf(NoSuchFileException.class, it.getCause()));
uploadFile.delete();
}
@Test
public void uploadAndDownloadFileExists() throws IOException {
String data = "Download file exists";
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
if (!downloadFile.exists()) {
assertTrue(downloadFile.createNewFile());
}
primaryFileAsyncClient.create(data.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes(StandardCharsets.UTF_8))),
data.length()).block();
StepVerifier.create(
primaryFileAsyncClient.downloadToFile(downloadFile.getPath())).verifyErrorSatisfies(it ->
assertInstanceOf(FileAlreadyExistsException.class, it.getCause()));
downloadFile.delete();
}
@Test
public void uploadAndDownloadToFileDoesNotExist() throws FileNotFoundException {
String data = "Download file does not exist";
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
if (downloadFile.exists()) {
assertTrue(downloadFile.delete());
}
primaryFileAsyncClient.create(data.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes(StandardCharsets.UTF_8))),
data.length()).block();
StepVerifier.create(primaryFileAsyncClient.downloadToFile(downloadFile.getPath())).assertNext(it ->
assertEquals(it.getContentLength(), data.length())).verifyComplete();
Scanner scanner = new Scanner(downloadFile).useDelimiter("\\Z");
assertEquals(data, scanner.next());
scanner.close();
downloadFile.delete();
}
@Test
public void uploadFromFileLease() throws IOException {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(
uploadFile, new ShareRequestConditions().setLeaseId(leaseId))).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void uploadFromFileLeaseFail() throws IOException {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(uploadFile, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void downloadToFileLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultFlux(),
DATA.getDefaultDataSizeLong())).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
StepVerifier.create(primaryFileAsyncClient.downloadToFileWithResponse(
downloadFile.toPath().toString(), null, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
downloadFile.delete();
}
@Test
public void downloadToFileLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultFlux(),
DATA.getDefaultDataSizeLong())).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
StepVerifier.create(primaryFileAsyncClient.downloadToFileWithResponse(
downloadFile.toPath().toString(), null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
downloadFile.delete();
}
@Disabled("Groovy version of this test was not asserting contents of result properly. Need to revisit this test.")
@Test
public void uploadRangeFromURL() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.upload(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
client.uploadRangeFromUrl(length, destinationOffset, sourceOffset, primaryFileAsyncClient.getFileUrl()
+ "?" + sasToken).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(client.download())).assertNext(it -> {
String result = new String(it);
for (int i = 0; i < length; i++) {
assertEquals(result.charAt((int) (destinationOffset + i)), data.charAt((int) (sourceOffset + i)));
}
}).verifyComplete();
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
@Test
public void uploadRangeFromURLLease() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
String leaseId = createLeaseClient(client).acquireLease().block();
StepVerifier.create(client.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset,
primaryFileAsyncClient.getFileUrl() + "?" + sasToken, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
}
@Test
public void uploadRangeFromURLLeaseFail() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
createLeaseClient(client).acquireLease().block();
StepVerifier.create(client.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset,
primaryFileAsyncClient.getFileUrl().toString() + "?" + sasToken,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void startCopy() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL,
new ShareFileCopyOptions(), getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId()))
.expectComplete().verify(Duration.ofMinutes(1));
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithArgs(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly,
boolean setArchiveAttribute, PermissionCopyModeType permissionType) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
if (setFilePermissionKey) {
smbProperties.setFilePermissionKey(filePermissionKey);
}
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, smbProperties,
setFilePermission ? FILE_PERMISSION : null, permissionType, ignoreReadOnly, setArchiveAttribute, null,
getPollingDuration(1000), null);
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyError() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL,
new ShareFileCopyOptions(), getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, null, null, null,
false, false, null, getPollingDuration(1000), new ShareRequestConditions().setLeaseId(leaseId));
StepVerifier.create(poller).assertNext(it -> {
assertNotNull(it.getValue().getCopyId());
}).expectComplete().verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyLeaseFail() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, null, null, null,
false, false, null, getPollingDuration(1000), new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithOptions(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly,
boolean setArchiveAttribute, PermissionCopyModeType permissionType) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
if (setFilePermissionKey) {
smbProperties.setFilePermissionKey(filePermissionKey);
}
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(setFilePermission ? FILE_PERMISSION : null)
.setIgnoreReadOnly(ignoreReadOnly)
.setArchiveAttribute(setArchiveAttribute)
.setPermissionCopyModeType(permissionType);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Test
public void startCopyWithOptionsIgnoreReadOnlyAndSetArchive() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setIgnoreReadOnly(true)
.setArchiveAttribute(true);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsFilePermission() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
FileSmbProperties properties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(),
smbProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(),
smbProperties.getFileLastWriteTime());
assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes());
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsChangeTime() {
ShareFileInfo client = primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
smbProperties.setFileChangeTime(testResourceNamer.now());
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
FileShareTestHelper.compareDatesWithPrecision(smbProperties.getFileChangeTime(),
Objects.requireNonNull(primaryFileAsyncClient.getProperties().block()).getSmbProperties()
.getFileChangeTime());
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsCopySmbFilePropertiesPermissionKey() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs)
.setFilePermissionKey(filePermissionKey);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
FileSmbProperties properties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(),
smbProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(),
smbProperties.getFileLastWriteTime());
assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes());
}
@Test
public void startCopyWithOptionsLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
}
@Test
public void startCopyWithOptionsInvalidLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = testResourceNamer.randomUuid();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions);
assertThrows(ShareStorageException.class, () -> primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000)).blockFirst());
}
@Test
public void startCopyWithOptionsMetadata() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setMetadata(testMetadata);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsWithOriginalSmbProperties() {
primaryFileAsyncClient.create(1024).block();
ShareFileProperties initialProperties = primaryFileAsyncClient.getProperties().block();
assertNotNull(initialProperties);
OffsetDateTime creationTime = initialProperties.getSmbProperties().getFileCreationTime();
OffsetDateTime lastWrittenTime = initialProperties.getSmbProperties().getFileLastWriteTime();
OffsetDateTime changedTime = initialProperties.getSmbProperties().getFileChangeTime();
EnumSet<NtfsFileAttributes> fileAttributes = initialProperties.getSmbProperties().getNtfsFileAttributes();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList()
.setCreatedOn(true)
.setLastWrittenOn(true)
.setChangedOn(true)
.setFileAttributes(true);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions)
.setSmbPropertiesToCopy(list);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
FileSmbProperties resultProperties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(creationTime, resultProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(lastWrittenTime, resultProperties.getFileLastWriteTime());
FileShareTestHelper.compareDatesWithPrecision(changedTime, resultProperties.getFileChangeTime());
assertEquals(fileAttributes, resultProperties.getNtfsFileAttributes());
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithOptionsCopySourceFileError(boolean createdOn, boolean lastWrittenOn, boolean changedOn,
boolean fileAttributes) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList()
.setCreatedOn(createdOn)
.setLastWrittenOn(lastWrittenOn)
.setChangedOn(changedOn)
.setFileAttributes(fileAttributes);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFileChangeTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE)
.setSmbPropertiesToCopy(list);
assertThrows(IllegalArgumentException.class, () -> primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000)));
}
@Disabled("TODO: Need to find a way of mocking pending copy status")
@Test
public void abortCopy() {
}
@Test
public void deleteFile() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse()).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileError() {
StepVerifier.create(primaryFileAsyncClient.delete()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void deleteFileLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse(new ShareRequestConditions().setLeaseId(leaseId)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse(new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void deleteIfExistsFile() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(null)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileThatDoesNotExist() {
ShareFileAsyncClient client = primaryFileAsyncClient.getFileAsyncClient(generateShareName());
Response<Boolean> response = client.deleteIfExistsWithResponse(null, null).block();
assertNotNull(response);
assertFalse(response.getValue());
assertEquals(response.getStatusCode(), 404);
assertNotEquals(Boolean.TRUE, client.exists().block());
}
@Test
public void deleteIfExistsFileThatWasAlreadyDeleted() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null, null, null).block();
assertEquals(Boolean.TRUE, primaryFileAsyncClient.deleteIfExists().block());
assertNotEquals(Boolean.TRUE, primaryFileAsyncClient.deleteIfExists().block());
}
@Test
public void deleteIfExistsFileLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(new ShareRequestConditions()
.setLeaseId(leaseId))).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202))
.verifyComplete();
}
@Test
public void deleteIfExistsFileLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void getProperties() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getETag());
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void getPropertiesLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse(
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void getPropertiesLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse(
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void getPropertiesError() {
StepVerifier.create(primaryFileAsyncClient.getProperties())
.verifyErrorSatisfies(it -> assertInstanceOf(ShareStorageException.class, it));
}
@Test
public void setHttpHeadersFpk() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, httpHeaders, smbProperties, null))
.assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void setHttpHeadersFp() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, httpHeaders, smbProperties,
FILE_PERMISSION)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void setHttpHeadersLease() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, null, null, null,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void setHttpHeadersLeaseFail() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, null, null, null,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void setHttpHeadersError() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
StepVerifier.create(primaryFileAsyncClient.setProperties(-1, null, null, null)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400, ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void setMetadata() {
primaryFileAsyncClient.createWithResponse(1024, httpHeaders, null, null, testMetadata).block();
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
StepVerifier.create(primaryFileAsyncClient.getProperties()).assertNext(it ->
assertEquals(testMetadata, it.getMetadata())).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(updatedMetadata))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.getProperties()).assertNext(it ->
assertEquals(updatedMetadata, it.getMetadata())).verifyComplete();
}
@Test
public void setMetadataError() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> errorMetadata = Collections.singletonMap("", "value");
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(errorMetadata))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void setMetadataLease() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> metadata = Collections.singletonMap("key", "value");
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(metadata,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void setMetadataLeaseFail() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> metadata = Collections.singletonMap("key", "value");
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(metadata,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void listRanges() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
StepVerifier.create(primaryFileAsyncClient.listRanges()).assertNext(it -> {
assertEquals(0, it.getStart());
assertEquals(1023, it.getEnd());
}).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesWithRange() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
StepVerifier.create(primaryFileAsyncClient.listRanges(new ShareFileRange(0, 511L)))
.assertNext(it -> {
assertEquals(0, it.getStart());
assertEquals(511, it.getEnd());
}).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesLease() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.listRanges(null, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesLeaseFail() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.listRanges(null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void listRangesDiff(List<FileRange> rangesToUpdate, List<FileRange> rangesToClear,
List<FileRange> expectedRanges, List<ClearRange> expectedClearRanges) {
String snapshotId = primaryFileAsyncClient.create(4 * Constants.MB)
.then(primaryFileAsyncClient.upload(Flux.just(FileShareTestHelper.getRandomByteBuffer(4 * Constants.MB)),
4 * Constants.MB))
.then(primaryFileServiceAsyncClient.getShareAsyncClient(primaryFileAsyncClient.getShareName())
.createSnapshot()
.map(ShareSnapshotInfo::getSnapshot))
.block();
Flux.fromIterable(rangesToUpdate)
.flatMap(it -> {
int size = (int) (it.getEnd() - it.getStart() + 1);
return primaryFileAsyncClient.uploadWithResponse(Flux.just(
FileShareTestHelper.getRandomByteBuffer(size)), size, it.getStart());
}).blockLast();
Flux.fromIterable(rangesToClear)
.flatMap(it -> primaryFileAsyncClient.clearRangeWithResponse(it.getEnd() - it.getStart() + 1,
it.getStart()))
.blockLast();
StepVerifier.create(primaryFileAsyncClient.listRangesDiff(snapshotId)).assertNext(it -> {
assertEquals(it.getRanges().size(), expectedRanges.size());
assertEquals(it.getClearRanges().size(), expectedClearRanges.size());
for (int i = 0; i < expectedRanges.size(); i++) {
FileRange actualRange = it.getRanges().get(i);
FileRange expectedRange = expectedRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
for (int i = 0; i < expectedClearRanges.size(); i++) {
ClearRange actualRange = it.getClearRanges().get(i);
ClearRange expectedRange = expectedClearRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
}).verifyComplete();
}
@Test
public void listHandles() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.listHandles()).verifyComplete();
}
@Test
public void listHandlesWithMaxResult() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.listHandles(2)).verifyComplete();
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void forceCloseHandleMin() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseHandle("1")).assertNext(it -> {
assertEquals(it.getClosedHandles(), 0);
assertEquals(it.getFailedHandles(), 0);
}).verifyComplete();
}
@Test
public void forceCloseHandleInvalidHandleID() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseHandle("invalidHandleId"))
.verifyErrorSatisfies(it -> assertInstanceOf(ShareStorageException.class, it));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void forceCloseAllHandlesMin() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseAllHandles())
.assertNext(it -> {
assertEquals(it.getClosedHandles(), 0);
assertEquals(it.getFailedHandles(), 0);
}).verifyComplete();
}
@Test
public void getSnapshotId() {
String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString();
ShareFileAsyncClient shareSnapshotClient = fileBuilderHelper(shareName, filePath).snapshot(snapshot)
.buildFileAsyncClient();
assertEquals(snapshot, shareSnapshotClient.getShareSnapshotId());
}
@Test
public void getShareName() {
assertEquals(shareName, primaryFileAsyncClient.getShareName());
}
@Test
public void getFilePath() {
assertEquals(filePath, primaryFileAsyncClient.getFilePath());
}
@EnabledIf("com.azure.storage.file.share.FileShareTestBase
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void listHandlesClientName() {
ShareAsyncClient client = primaryFileServiceAsyncClient.getShareAsyncClient("testing");
ShareDirectoryAsyncClient directoryClient = client.getDirectoryClient("dir1");
ShareFileAsyncClient fileClient = directoryClient.getFileClient("test.txt");
List<HandleItem> list = fileClient.listHandles().collectList().block();
assertNotNull(list.get(0).getClientName());
}
} |
I checked the .NET tests for this, the behavior you are describing is correct. | public void uploadRangeFromURLOAuth() {
ShareServiceAsyncClient oAuthServiceClient = getOAuthServiceClientAsyncSharedKey(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryAsyncClient dirClient = oAuthServiceClient.getShareAsyncClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create().block();
String fileName = generatePathName();
ShareFileAsyncClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
int sourceOffset = 5;
int length = 5;
int destinationOffset = 0;
fileClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(fileClient.getShareName())
.setFilePath(fileClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
String fileNameDest = generatePathName();
ShareFileAsyncClient fileClientDest = dirClient.getFileClient(fileNameDest);
fileClientDest.create(1024).block();
StepVerifier.create(fileClientDest.uploadRangeFromUrlWithResponse(length,
destinationOffset, sourceOffset, fileClient.getFileUrl() + "?" + sasToken))
.assertNext(r -> assertEquals(r.getStatusCode(), 201))
.verifyComplete();
StepVerifier.create(fileClientDest.downloadWithResponse(null)
.flatMap(r -> {
assertTrue(r.getStatusCode() == 200 || r.getStatusCode() == 206);
ShareFileDownloadHeaders headers = r.getDeserializedHeaders();
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
return FluxUtil.collectBytesInByteBufferStream(r.getValue());
}))
.assertNext(bytes -> {
assertEquals(bytes[0], 117);
})
.verifyComplete();
} | StepVerifier.create(fileClientDest.uploadRangeFromUrlWithResponse(length, | public void uploadRangeFromURLOAuth() {
ShareServiceAsyncClient oAuthServiceClient = getOAuthServiceClientAsyncSharedKey(new ShareServiceClientBuilder()
.shareTokenIntent(ShareTokenIntent.BACKUP));
ShareDirectoryAsyncClient dirClient = oAuthServiceClient.getShareAsyncClient(shareName)
.getDirectoryClient(generatePathName());
dirClient.create().block();
String fileName = generatePathName();
ShareFileAsyncClient fileClient = dirClient.getFileClient(fileName);
fileClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
int sourceOffset = 5;
int length = 5;
int destinationOffset = 0;
fileClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(fileClient.getShareName())
.setFilePath(fileClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
String fileNameDest = generatePathName();
ShareFileAsyncClient fileClientDest = dirClient.getFileClient(fileNameDest);
fileClientDest.create(1024).block();
StepVerifier.create(fileClientDest.uploadRangeFromUrlWithResponse(length,
destinationOffset, sourceOffset, fileClient.getFileUrl() + "?" + sasToken))
.assertNext(r -> assertEquals(r.getStatusCode(), 201))
.verifyComplete();
StepVerifier.create(fileClientDest.downloadWithResponse(null)
.flatMap(r -> {
assertTrue(r.getStatusCode() == 200 || r.getStatusCode() == 206);
ShareFileDownloadHeaders headers = r.getDeserializedHeaders();
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
return FluxUtil.collectBytesInByteBufferStream(r.getValue());
}))
.assertNext(bytes -> {
assertEquals(bytes[0], 117);
})
.verifyComplete();
} | class FileAsyncApiTests extends FileShareTestBase {
private ShareFileAsyncClient primaryFileAsyncClient;
private ShareClient shareClient;
private String shareName;
private String filePath;
private static Map<String, String> testMetadata;
private static ShareFileHttpHeaders httpHeaders;
private FileSmbProperties smbProperties;
private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL";
@BeforeEach
public void setup() {
shareName = generateShareName();
filePath = generatePathName();
shareClient = shareBuilderHelper(shareName).buildClient();
shareClient.create();
primaryFileAsyncClient = fileBuilderHelper(shareName, filePath).buildFileAsyncClient();
testMetadata = Collections.singletonMap("testmetadata", "value");
httpHeaders = new ShareFileHttpHeaders().setContentLanguage("en")
.setContentType("application/octet-stream");
smbProperties = new FileSmbProperties().setNtfsFileAttributes(EnumSet.<NtfsFileAttributes> of(NtfsFileAttributes.NORMAL));
}
@Test
public void getFileURL() {
String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount()
.getConnectionString()).getAccountName();
String expectURL = String.format("https:
String fileURL = primaryFileAsyncClient.getFileUrl();
assertEquals(expectURL, fileURL);
}
@Test
public void createFile() {
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, null, null, null, null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createFileError() {
StepVerifier.create(primaryFileAsyncClient.create(-1)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400, ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void createFileWithArgsFpk() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, httpHeaders, smbProperties, null,
testMetadata)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void createFileWithArgsFp() {
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, httpHeaders, smbProperties, FILE_PERMISSION,
testMetadata)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void createFileWithArgsError() {
StepVerifier.create(primaryFileAsyncClient.createWithResponse(-1, null, null, null, testMetadata))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void createLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.createWithResponse(DATA.getDefaultDataSizeLong() + 1, null, null,
null, null, new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void createLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.createWithResponse(DATA.getDefaultDataSizeLong() + 1, null, null,
null, null, new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
/*
* Tests downloading a file using a default clientThatdoesn't have a HttpClient passed to it.
*/
/*
* Tests downloading a file using a default client that doesn't have a HttpClient passed to it.
*/
@EnabledIf("com.azure.storage.file.share.FileShareTestBase
@ParameterizedTest
@ValueSource(ints = {
0,
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
})
public void downloadFileBufferCopy(int fileSize) throws IOException {
ShareServiceAsyncClient shareServiceAsyncClient = new ShareServiceClientBuilder()
.connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString())
.buildAsyncClient();
ShareFileAsyncClient fileClient = shareServiceAsyncClient.getShareAsyncClient(shareName)
.createFile(filePath, fileSize).block();
File file = FileShareTestHelper.getRandomFile(fileSize);
assertNotNull(fileClient);
fileClient.uploadFromFile(file.toPath().toString()).block();
File outFile = new File(generatePathName() + ".txt");
if (outFile.exists()) {
assertTrue(outFile.delete());
}
fileClient.downloadToFile(outFile.toPath().toString()).block();
assertTrue(FileShareTestHelper.compareFiles(file, outFile, 0, fileSize));
shareServiceAsyncClient.deleteShare(shareName).block();
outFile.delete();
file.delete();
}
@Test
public void uploadAndDownloadData() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null)).assertNext(response -> {
assertTrue((response.getStatusCode() == 200) || (response.getStatusCode() == 206));
ShareFileDownloadHeaders headers = response.getDeserializedHeaders();
assertEquals(DATA.getDefaultDataSizeLong(), headers.getContentLength());
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
FluxUtil.collectBytesInByteBufferStream(response.getValue())
.flatMap(actualData -> {
assertArrayEquals(DATA.getDefaultBytes(), actualData);
return Mono.empty();
});
}).verifyComplete();
}
@Test
public void uploadAndDownloadDataWithArgs() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong()).setOffset(1L)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(1,
DATA.getDefaultDataSizeLong()), true)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 206);
assertEquals(DATA.getDefaultDataSizeLong(), it.getDeserializedHeaders().getContentLength());
FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(actualData -> {
assertArrayEquals(DATA.getDefaultBytes(), actualData);
return Mono.empty();
});
}).verifyComplete();
}
@Test
public void uploadDataError() {
StepVerifier.create(primaryFileAsyncClient.upload(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong()))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void uploadLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(
DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())
.setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)))).expectNextCount(1);
}
@Test
public void uploadLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())
.setRequestConditions(new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid()))))
.verifyError(ShareStorageException.class);
}
@ParameterizedTest
@MethodSource("uploadDataLengthMismatchSupplier")
public void uploadDataLengthMismatch(long size, String errMsg) {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), size).setOffset(0L))).verifyErrorSatisfies(it -> {
assertInstanceOf(UnexpectedLengthException.class, it);
assertTrue(it.getMessage().contains(errMsg));
});
}
private static Stream<Arguments> uploadDataLengthMismatchSupplier() {
return Stream.of(
Arguments.of(6, "more than"),
Arguments.of(8, "less than"));
}
@Test
public void downloadDataError() {
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(0, 1023L), false))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void downloadLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null, null, new ShareRequestConditions()
.setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void downloadLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null, null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void uploadAndClearRange() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.upload(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 0)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(0, 6L), false))
.assertNext(it -> FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(data -> {
for (byte b : data) {
assertEquals(b, 0);
}
return Mono.empty();
})).verifyComplete();
}
@Test
public void uploadAndClearRangeWithArgs() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 1)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
}).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(1, 7L), false))
.assertNext(it -> FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(data -> {
for (byte b : data) {
assertEquals(b, 0);
}
return Mono.empty();
})).verifyComplete();
fullInfoData.clear();
}
@Test
public void clearRangeError() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRange(30)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 416, ShareErrorCode.INVALID_RANGE));
}
@Test
public void clearRangeErrorArgs() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.upload(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 20)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 416, ShareErrorCode.INVALID_RANGE));
fullInfoData.clear();
}
@Test
public void clearRangeLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(1, 0, new ShareRequestConditions()
.setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void clearRangeLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(1, 0, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void uploadFileDoesNotExist() {
File uploadFile = new File(testFolder.getPath() + "/fakefile.txt");
if (uploadFile.exists()) {
assertTrue(uploadFile.delete());
}
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(uploadFile.getPath()))
.verifyErrorSatisfies(it -> assertInstanceOf(NoSuchFileException.class, it.getCause()));
uploadFile.delete();
}
@Test
public void uploadAndDownloadFileExists() throws IOException {
String data = "Download file exists";
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
if (!downloadFile.exists()) {
assertTrue(downloadFile.createNewFile());
}
primaryFileAsyncClient.create(data.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes(StandardCharsets.UTF_8))),
data.length()).block();
StepVerifier.create(
primaryFileAsyncClient.downloadToFile(downloadFile.getPath())).verifyErrorSatisfies(it ->
assertInstanceOf(FileAlreadyExistsException.class, it.getCause()));
downloadFile.delete();
}
@Test
public void uploadAndDownloadToFileDoesNotExist() throws FileNotFoundException {
String data = "Download file does not exist";
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
if (downloadFile.exists()) {
assertTrue(downloadFile.delete());
}
primaryFileAsyncClient.create(data.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes(StandardCharsets.UTF_8))),
data.length()).block();
StepVerifier.create(primaryFileAsyncClient.downloadToFile(downloadFile.getPath())).assertNext(it ->
assertEquals(it.getContentLength(), data.length())).verifyComplete();
Scanner scanner = new Scanner(downloadFile).useDelimiter("\\Z");
assertEquals(data, scanner.next());
scanner.close();
downloadFile.delete();
}
@Test
public void uploadFromFileLease() throws IOException {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(
uploadFile, new ShareRequestConditions().setLeaseId(leaseId))).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void uploadFromFileLeaseFail() throws IOException {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(uploadFile, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void downloadToFileLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultFlux(),
DATA.getDefaultDataSizeLong())).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
StepVerifier.create(primaryFileAsyncClient.downloadToFileWithResponse(
downloadFile.toPath().toString(), null, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
downloadFile.delete();
}
@Test
public void downloadToFileLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultFlux(),
DATA.getDefaultDataSizeLong())).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
StepVerifier.create(primaryFileAsyncClient.downloadToFileWithResponse(
downloadFile.toPath().toString(), null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
downloadFile.delete();
}
@Disabled("Groovy version of this test was not asserting contents of result properly. Need to revisit this test.")
@Test
public void uploadRangeFromURL() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.upload(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
client.uploadRangeFromUrl(length, destinationOffset, sourceOffset, primaryFileAsyncClient.getFileUrl()
+ "?" + sasToken).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(client.download())).assertNext(it -> {
String result = new String(it);
for (int i = 0; i < length; i++) {
assertEquals(result.charAt((int) (destinationOffset + i)), data.charAt((int) (sourceOffset + i)));
}
}).verifyComplete();
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
@Test
public void uploadRangeFromURLLease() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
String leaseId = createLeaseClient(client).acquireLease().block();
StepVerifier.create(client.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset,
primaryFileAsyncClient.getFileUrl() + "?" + sasToken, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
}
@Test
public void uploadRangeFromURLLeaseFail() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
createLeaseClient(client).acquireLease().block();
StepVerifier.create(client.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset,
primaryFileAsyncClient.getFileUrl().toString() + "?" + sasToken,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void startCopy() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL,
new ShareFileCopyOptions(), getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId()))
.expectComplete().verify(Duration.ofMinutes(1));
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithArgs(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly,
boolean setArchiveAttribute, PermissionCopyModeType permissionType) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
if (setFilePermissionKey) {
smbProperties.setFilePermissionKey(filePermissionKey);
}
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, smbProperties,
setFilePermission ? FILE_PERMISSION : null, permissionType, ignoreReadOnly, setArchiveAttribute, null,
getPollingDuration(1000), null);
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyError() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL,
new ShareFileCopyOptions(), getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, null, null, null,
false, false, null, getPollingDuration(1000), new ShareRequestConditions().setLeaseId(leaseId));
StepVerifier.create(poller).assertNext(it -> {
assertNotNull(it.getValue().getCopyId());
}).expectComplete().verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyLeaseFail() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, null, null, null,
false, false, null, getPollingDuration(1000), new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithOptions(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly,
boolean setArchiveAttribute, PermissionCopyModeType permissionType) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
if (setFilePermissionKey) {
smbProperties.setFilePermissionKey(filePermissionKey);
}
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(setFilePermission ? FILE_PERMISSION : null)
.setIgnoreReadOnly(ignoreReadOnly)
.setArchiveAttribute(setArchiveAttribute)
.setPermissionCopyModeType(permissionType);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Test
public void startCopyWithOptionsIgnoreReadOnlyAndSetArchive() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setIgnoreReadOnly(true)
.setArchiveAttribute(true);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsFilePermission() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
FileSmbProperties properties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(),
smbProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(),
smbProperties.getFileLastWriteTime());
assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes());
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsChangeTime() {
ShareFileInfo client = primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
smbProperties.setFileChangeTime(testResourceNamer.now());
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
FileShareTestHelper.compareDatesWithPrecision(smbProperties.getFileChangeTime(),
Objects.requireNonNull(primaryFileAsyncClient.getProperties().block()).getSmbProperties()
.getFileChangeTime());
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsCopySmbFilePropertiesPermissionKey() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs)
.setFilePermissionKey(filePermissionKey);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
FileSmbProperties properties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(),
smbProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(),
smbProperties.getFileLastWriteTime());
assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes());
}
@Test
public void startCopyWithOptionsLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
}
@Test
public void startCopyWithOptionsInvalidLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = testResourceNamer.randomUuid();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions);
assertThrows(ShareStorageException.class, () -> primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000)).blockFirst());
}
@Test
public void startCopyWithOptionsMetadata() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setMetadata(testMetadata);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsWithOriginalSmbProperties() {
primaryFileAsyncClient.create(1024).block();
ShareFileProperties initialProperties = primaryFileAsyncClient.getProperties().block();
assertNotNull(initialProperties);
OffsetDateTime creationTime = initialProperties.getSmbProperties().getFileCreationTime();
OffsetDateTime lastWrittenTime = initialProperties.getSmbProperties().getFileLastWriteTime();
OffsetDateTime changedTime = initialProperties.getSmbProperties().getFileChangeTime();
EnumSet<NtfsFileAttributes> fileAttributes = initialProperties.getSmbProperties().getNtfsFileAttributes();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList()
.setCreatedOn(true)
.setLastWrittenOn(true)
.setChangedOn(true)
.setFileAttributes(true);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions)
.setSmbPropertiesToCopy(list);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
FileSmbProperties resultProperties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(creationTime, resultProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(lastWrittenTime, resultProperties.getFileLastWriteTime());
FileShareTestHelper.compareDatesWithPrecision(changedTime, resultProperties.getFileChangeTime());
assertEquals(fileAttributes, resultProperties.getNtfsFileAttributes());
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithOptionsCopySourceFileError(boolean createdOn, boolean lastWrittenOn, boolean changedOn,
boolean fileAttributes) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList()
.setCreatedOn(createdOn)
.setLastWrittenOn(lastWrittenOn)
.setChangedOn(changedOn)
.setFileAttributes(fileAttributes);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFileChangeTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE)
.setSmbPropertiesToCopy(list);
assertThrows(IllegalArgumentException.class, () -> primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000)));
}
@Disabled("TODO: Need to find a way of mocking pending copy status")
@Test
public void abortCopy() {
}
@Test
public void deleteFile() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse()).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileError() {
StepVerifier.create(primaryFileAsyncClient.delete()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void deleteFileLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse(new ShareRequestConditions().setLeaseId(leaseId)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse(new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void deleteIfExistsFile() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(null)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileThatDoesNotExist() {
ShareFileAsyncClient client = primaryFileAsyncClient.getFileAsyncClient(generateShareName());
Response<Boolean> response = client.deleteIfExistsWithResponse(null, null).block();
assertNotNull(response);
assertFalse(response.getValue());
assertEquals(response.getStatusCode(), 404);
assertNotEquals(Boolean.TRUE, client.exists().block());
}
@Test
public void deleteIfExistsFileThatWasAlreadyDeleted() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null, null, null).block();
assertEquals(Boolean.TRUE, primaryFileAsyncClient.deleteIfExists().block());
assertNotEquals(Boolean.TRUE, primaryFileAsyncClient.deleteIfExists().block());
}
@Test
public void deleteIfExistsFileLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(new ShareRequestConditions()
.setLeaseId(leaseId))).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202))
.verifyComplete();
}
@Test
public void deleteIfExistsFileLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void getProperties() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getETag());
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void getPropertiesLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse(
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void getPropertiesLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse(
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void getPropertiesError() {
StepVerifier.create(primaryFileAsyncClient.getProperties())
.verifyErrorSatisfies(it -> assertInstanceOf(ShareStorageException.class, it));
}
@Test
public void setHttpHeadersFpk() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, httpHeaders, smbProperties, null))
.assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void setHttpHeadersFp() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, httpHeaders, smbProperties,
FILE_PERMISSION)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void setHttpHeadersLease() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, null, null, null,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void setHttpHeadersLeaseFail() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, null, null, null,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void setHttpHeadersError() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
StepVerifier.create(primaryFileAsyncClient.setProperties(-1, null, null, null)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400, ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void setMetadata() {
primaryFileAsyncClient.createWithResponse(1024, httpHeaders, null, null, testMetadata).block();
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
StepVerifier.create(primaryFileAsyncClient.getProperties()).assertNext(it ->
assertEquals(testMetadata, it.getMetadata())).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(updatedMetadata))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.getProperties()).assertNext(it ->
assertEquals(updatedMetadata, it.getMetadata())).verifyComplete();
}
@Test
public void setMetadataError() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> errorMetadata = Collections.singletonMap("", "value");
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(errorMetadata))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void setMetadataLease() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> metadata = Collections.singletonMap("key", "value");
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(metadata,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void setMetadataLeaseFail() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> metadata = Collections.singletonMap("key", "value");
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(metadata,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void listRanges() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
StepVerifier.create(primaryFileAsyncClient.listRanges()).assertNext(it -> {
assertEquals(0, it.getStart());
assertEquals(1023, it.getEnd());
}).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesWithRange() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
StepVerifier.create(primaryFileAsyncClient.listRanges(new ShareFileRange(0, 511L)))
.assertNext(it -> {
assertEquals(0, it.getStart());
assertEquals(511, it.getEnd());
}).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesLease() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.listRanges(null, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesLeaseFail() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.listRanges(null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void listRangesDiff(List<FileRange> rangesToUpdate, List<FileRange> rangesToClear,
List<FileRange> expectedRanges, List<ClearRange> expectedClearRanges) {
String snapshotId = primaryFileAsyncClient.create(4 * Constants.MB)
.then(primaryFileAsyncClient.upload(Flux.just(FileShareTestHelper.getRandomByteBuffer(4 * Constants.MB)),
4 * Constants.MB))
.then(primaryFileServiceAsyncClient.getShareAsyncClient(primaryFileAsyncClient.getShareName())
.createSnapshot()
.map(ShareSnapshotInfo::getSnapshot))
.block();
Flux.fromIterable(rangesToUpdate)
.flatMap(it -> {
int size = (int) (it.getEnd() - it.getStart() + 1);
return primaryFileAsyncClient.uploadWithResponse(Flux.just(
FileShareTestHelper.getRandomByteBuffer(size)), size, it.getStart());
}).blockLast();
Flux.fromIterable(rangesToClear)
.flatMap(it -> primaryFileAsyncClient.clearRangeWithResponse(it.getEnd() - it.getStart() + 1,
it.getStart()))
.blockLast();
StepVerifier.create(primaryFileAsyncClient.listRangesDiff(snapshotId)).assertNext(it -> {
assertEquals(it.getRanges().size(), expectedRanges.size());
assertEquals(it.getClearRanges().size(), expectedClearRanges.size());
for (int i = 0; i < expectedRanges.size(); i++) {
FileRange actualRange = it.getRanges().get(i);
FileRange expectedRange = expectedRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
for (int i = 0; i < expectedClearRanges.size(); i++) {
ClearRange actualRange = it.getClearRanges().get(i);
ClearRange expectedRange = expectedClearRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
}).verifyComplete();
}
@Test
public void listHandles() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.listHandles()).verifyComplete();
}
@Test
public void listHandlesWithMaxResult() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.listHandles(2)).verifyComplete();
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void forceCloseHandleMin() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseHandle("1")).assertNext(it -> {
assertEquals(it.getClosedHandles(), 0);
assertEquals(it.getFailedHandles(), 0);
}).verifyComplete();
}
@Test
public void forceCloseHandleInvalidHandleID() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseHandle("invalidHandleId"))
.verifyErrorSatisfies(it -> assertInstanceOf(ShareStorageException.class, it));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void forceCloseAllHandlesMin() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseAllHandles())
.assertNext(it -> {
assertEquals(it.getClosedHandles(), 0);
assertEquals(it.getFailedHandles(), 0);
}).verifyComplete();
}
@Test
public void getSnapshotId() {
String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString();
ShareFileAsyncClient shareSnapshotClient = fileBuilderHelper(shareName, filePath).snapshot(snapshot)
.buildFileAsyncClient();
assertEquals(snapshot, shareSnapshotClient.getShareSnapshotId());
}
@Test
public void getShareName() {
assertEquals(shareName, primaryFileAsyncClient.getShareName());
}
@Test
public void getFilePath() {
assertEquals(filePath, primaryFileAsyncClient.getFilePath());
}
} | class FileAsyncApiTests extends FileShareTestBase {
private ShareFileAsyncClient primaryFileAsyncClient;
private ShareClient shareClient;
private String shareName;
private String filePath;
private static Map<String, String> testMetadata;
private static ShareFileHttpHeaders httpHeaders;
private FileSmbProperties smbProperties;
private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL";
@BeforeEach
public void setup() {
shareName = generateShareName();
filePath = generatePathName();
shareClient = shareBuilderHelper(shareName).buildClient();
shareClient.create();
primaryFileAsyncClient = fileBuilderHelper(shareName, filePath).buildFileAsyncClient();
testMetadata = Collections.singletonMap("testmetadata", "value");
httpHeaders = new ShareFileHttpHeaders().setContentLanguage("en")
.setContentType("application/octet-stream");
smbProperties = new FileSmbProperties().setNtfsFileAttributes(EnumSet.<NtfsFileAttributes> of(NtfsFileAttributes.NORMAL));
}
@Test
public void getFileURL() {
String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount()
.getConnectionString()).getAccountName();
String expectURL = String.format("https:
String fileURL = primaryFileAsyncClient.getFileUrl();
assertEquals(expectURL, fileURL);
}
@Test
public void createFile() {
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, null, null, null, null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createFileError() {
StepVerifier.create(primaryFileAsyncClient.create(-1)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400, ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void createFileWithArgsFpk() {
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, httpHeaders, smbProperties, null,
testMetadata)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void createFileWithArgsFp() {
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
StepVerifier.create(primaryFileAsyncClient.createWithResponse(1024, httpHeaders, smbProperties, FILE_PERMISSION,
testMetadata)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void createFileWithArgsError() {
StepVerifier.create(primaryFileAsyncClient.createWithResponse(-1, null, null, null, testMetadata))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void createLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.createWithResponse(DATA.getDefaultDataSizeLong() + 1, null, null,
null, null, new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void createLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.createWithResponse(DATA.getDefaultDataSizeLong() + 1, null, null,
null, null, new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
/*
* Tests downloading a file using a default clientThatdoesn't have a HttpClient passed to it.
*/
/*
* Tests downloading a file using a default client that doesn't have a HttpClient passed to it.
*/
@EnabledIf("com.azure.storage.file.share.FileShareTestBase
@ParameterizedTest
@ValueSource(ints = {
0,
20,
16 * 1024 * 1024,
8 * 1026 * 1024 + 10,
50 * Constants.MB
})
public void downloadFileBufferCopy(int fileSize) throws IOException {
ShareServiceAsyncClient shareServiceAsyncClient = new ShareServiceClientBuilder()
.connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString())
.buildAsyncClient();
ShareFileAsyncClient fileClient = shareServiceAsyncClient.getShareAsyncClient(shareName)
.createFile(filePath, fileSize).block();
File file = FileShareTestHelper.getRandomFile(fileSize);
assertNotNull(fileClient);
fileClient.uploadFromFile(file.toPath().toString()).block();
File outFile = new File(generatePathName() + ".txt");
if (outFile.exists()) {
assertTrue(outFile.delete());
}
fileClient.downloadToFile(outFile.toPath().toString()).block();
assertTrue(FileShareTestHelper.compareFiles(file, outFile, 0, fileSize));
shareServiceAsyncClient.deleteShare(shareName).block();
outFile.delete();
file.delete();
}
@Test
public void uploadAndDownloadData() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null)).assertNext(response -> {
assertTrue((response.getStatusCode() == 200) || (response.getStatusCode() == 206));
ShareFileDownloadHeaders headers = response.getDeserializedHeaders();
assertEquals(DATA.getDefaultDataSizeLong(), headers.getContentLength());
assertNotNull(headers.getETag());
assertNotNull(headers.getLastModified());
assertNotNull(headers.getFilePermissionKey());
assertNotNull(headers.getFileAttributes());
assertNotNull(headers.getFileLastWriteTime());
assertNotNull(headers.getFileCreationTime());
assertNotNull(headers.getFileChangeTime());
assertNotNull(headers.getFileParentId());
assertNotNull(headers.getFileId());
FluxUtil.collectBytesInByteBufferStream(response.getValue())
.flatMap(actualData -> {
assertArrayEquals(DATA.getDefaultBytes(), actualData);
return Mono.empty();
});
}).verifyComplete();
}
@Test
public void uploadAndDownloadDataWithArgs() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong()).setOffset(1L)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(1,
DATA.getDefaultDataSizeLong()), true)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 206);
assertEquals(DATA.getDefaultDataSizeLong(), it.getDeserializedHeaders().getContentLength());
FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(actualData -> {
assertArrayEquals(DATA.getDefaultBytes(), actualData);
return Mono.empty();
});
}).verifyComplete();
}
@Test
public void uploadDataError() {
StepVerifier.create(primaryFileAsyncClient.upload(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong()))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void uploadLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(
DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())
.setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)))).expectNextCount(1);
}
@Test
public void uploadLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), DATA.getDefaultDataSizeLong())
.setRequestConditions(new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid()))))
.verifyError(ShareStorageException.class);
}
@ParameterizedTest
@MethodSource("uploadDataLengthMismatchSupplier")
public void uploadDataLengthMismatch(long size, String errMsg) {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(DATA.getDefaultFlux(), size).setOffset(0L))).verifyErrorSatisfies(it -> {
assertInstanceOf(UnexpectedLengthException.class, it);
assertTrue(it.getMessage().contains(errMsg));
});
}
private static Stream<Arguments> uploadDataLengthMismatchSupplier() {
return Stream.of(
Arguments.of(6, "more than"),
Arguments.of(8, "less than"));
}
@Test
public void downloadDataError() {
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(0, 1023L), false))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void downloadLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null, null, new ShareRequestConditions()
.setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void downloadLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(null, null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void uploadAndClearRange() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.upload(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 0)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(0, 6L), false))
.assertNext(it -> FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(data -> {
for (byte b : data) {
assertEquals(b, 0);
}
return Mono.empty();
})).verifyComplete();
}
@Test
public void uploadAndClearRangeWithArgs() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 1)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
}).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.downloadWithResponse(new ShareFileRange(1, 7L), false))
.assertNext(it -> FluxUtil.collectBytesInByteBufferStream(it.getValue()).flatMap(data -> {
for (byte b : data) {
assertEquals(b, 0);
}
return Mono.empty();
})).verifyComplete();
fullInfoData.clear();
}
@Test
public void clearRangeError() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRange(30)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 416, ShareErrorCode.INVALID_RANGE));
}
@Test
public void clearRangeErrorArgs() {
String fullInfoString = "please clear the range";
ByteBuffer fullInfoData = ByteBuffer.wrap(fullInfoString.getBytes(StandardCharsets.UTF_8));
primaryFileAsyncClient.create(fullInfoString.length()).block();
primaryFileAsyncClient.upload(Flux.just(fullInfoData), fullInfoString.length()).block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(7, 20)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 416, ShareErrorCode.INVALID_RANGE));
fullInfoData.clear();
}
@Test
public void clearRangeLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(1, 0, new ShareRequestConditions()
.setLeaseId(leaseId))).expectNextCount(1);
}
@Test
public void clearRangeLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.clearRangeWithResponse(1, 0, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void uploadFileDoesNotExist() {
File uploadFile = new File(testFolder.getPath() + "/fakefile.txt");
if (uploadFile.exists()) {
assertTrue(uploadFile.delete());
}
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(uploadFile.getPath()))
.verifyErrorSatisfies(it -> assertInstanceOf(NoSuchFileException.class, it.getCause()));
uploadFile.delete();
}
@Test
public void uploadAndDownloadFileExists() throws IOException {
String data = "Download file exists";
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
if (!downloadFile.exists()) {
assertTrue(downloadFile.createNewFile());
}
primaryFileAsyncClient.create(data.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes(StandardCharsets.UTF_8))),
data.length()).block();
StepVerifier.create(
primaryFileAsyncClient.downloadToFile(downloadFile.getPath())).verifyErrorSatisfies(it ->
assertInstanceOf(FileAlreadyExistsException.class, it.getCause()));
downloadFile.delete();
}
@Test
public void uploadAndDownloadToFileDoesNotExist() throws FileNotFoundException {
String data = "Download file does not exist";
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
if (downloadFile.exists()) {
assertTrue(downloadFile.delete());
}
primaryFileAsyncClient.create(data.length()).block();
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes(StandardCharsets.UTF_8))),
data.length()).block();
StepVerifier.create(primaryFileAsyncClient.downloadToFile(downloadFile.getPath())).assertNext(it ->
assertEquals(it.getContentLength(), data.length())).verifyComplete();
Scanner scanner = new Scanner(downloadFile).useDelimiter("\\Z");
assertEquals(data, scanner.next());
scanner.close();
downloadFile.delete();
}
@Test
public void uploadFromFileLease() throws IOException {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(
uploadFile, new ShareRequestConditions().setLeaseId(leaseId))).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void uploadFromFileLeaseFail() throws IOException {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
StepVerifier.create(primaryFileAsyncClient.uploadFromFile(uploadFile, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void downloadToFileLease() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultFlux(),
DATA.getDefaultDataSizeLong())).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
StepVerifier.create(primaryFileAsyncClient.downloadToFileWithResponse(
downloadFile.toPath().toString(), null, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
downloadFile.delete();
}
@Test
public void downloadToFileLeaseFail() {
primaryFileAsyncClient.create(DATA.getDefaultDataSizeLong()).block();
primaryFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultFlux(),
DATA.getDefaultDataSizeLong())).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix));
StepVerifier.create(primaryFileAsyncClient.downloadToFileWithResponse(
downloadFile.toPath().toString(), null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
downloadFile.delete();
}
@Disabled("Groovy version of this test was not asserting contents of result properly. Need to revisit this test.")
@Test
public void uploadRangeFromURL() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.upload(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
client.uploadRangeFromUrl(length, destinationOffset, sourceOffset, primaryFileAsyncClient.getFileUrl()
+ "?" + sasToken).block();
StepVerifier.create(FluxUtil.collectBytesInByteBufferStream(client.download())).assertNext(it -> {
String result = new String(it);
for (int i = 0; i < length; i++) {
assertEquals(result.charAt((int) (destinationOffset + i)), data.charAt((int) (sourceOffset + i)));
}
}).verifyComplete();
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
@Test
public void uploadRangeFromURLLease() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
String leaseId = createLeaseClient(client).acquireLease().block();
StepVerifier.create(client.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset,
primaryFileAsyncClient.getFileUrl() + "?" + sasToken, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
}
@Test
public void uploadRangeFromURLLeaseFail() {
primaryFileAsyncClient.create(1024).block();
String data = "The quick brown fox jumps over the lazy dog";
long sourceOffset = 5;
int length = 5;
long destinationOffset = 0;
primaryFileAsyncClient.uploadRange(Flux.just(ByteBuffer.wrap(data.getBytes())), data.length()).block();
StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(
ENVIRONMENT.getPrimaryAccount().getConnectionString());
String sasToken = new ShareServiceSasSignatureValues()
.setExpiryTime(testResourceNamer.now().plusDays(1))
.setPermissions(new ShareFileSasPermission().setReadPermission(true))
.setShareName(primaryFileAsyncClient.getShareName())
.setFilePath(primaryFileAsyncClient.getFilePath())
.generateSasQueryParameters(credential)
.encode();
ShareFileAsyncClient client = fileBuilderHelper(shareName, "destination")
.endpoint(primaryFileAsyncClient.getFileUrl()).buildFileAsyncClient();
client.create(1024).block();
createLeaseClient(client).acquireLease().block();
StepVerifier.create(client.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset,
primaryFileAsyncClient.getFileUrl().toString() + "?" + sasToken,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void startCopy() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL,
new ShareFileCopyOptions(), getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId()))
.expectComplete().verify(Duration.ofMinutes(1));
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithArgs(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly,
boolean setArchiveAttribute, PermissionCopyModeType permissionType) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
if (setFilePermissionKey) {
smbProperties.setFilePermissionKey(filePermissionKey);
}
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, smbProperties,
setFilePermission ? FILE_PERMISSION : null, permissionType, ignoreReadOnly, setArchiveAttribute, null,
getPollingDuration(1000), null);
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyError() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL,
new ShareFileCopyOptions(), getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, null, null, null,
false, false, null, getPollingDuration(1000), new ShareRequestConditions().setLeaseId(leaseId));
StepVerifier.create(poller).assertNext(it -> {
assertNotNull(it.getValue().getCopyId());
}).expectComplete().verify(Duration.ofMinutes(1));
}
@Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap "
+ "between the time subscribed and the time we start observing events.")
@Test
public void startCopyLeaseFail() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, null, null, null,
false, false, null, getPollingDuration(1000), new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithOptions(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly,
boolean setArchiveAttribute, PermissionCopyModeType permissionType) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
if (setFilePermissionKey) {
smbProperties.setFilePermissionKey(filePermissionKey);
}
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(setFilePermission ? FILE_PERMISSION : null)
.setIgnoreReadOnly(ignoreReadOnly)
.setArchiveAttribute(setArchiveAttribute)
.setPermissionCopyModeType(permissionType);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@Test
public void startCopyWithOptionsIgnoreReadOnlyAndSetArchive() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setIgnoreReadOnly(true)
.setArchiveAttribute(true);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsFilePermission() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it -> assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
FileSmbProperties properties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(),
smbProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(),
smbProperties.getFileLastWriteTime());
assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes());
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsChangeTime() {
ShareFileInfo client = primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
smbProperties.setFileChangeTime(testResourceNamer.now());
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
FileShareTestHelper.compareDatesWithPrecision(smbProperties.getFileChangeTime(),
Objects.requireNonNull(primaryFileAsyncClient.getProperties().block()).getSmbProperties()
.getFileChangeTime());
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsCopySmbFilePropertiesPermissionKey() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs)
.setFilePermissionKey(filePermissionKey);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete()
.verify(Duration.ofMinutes(1));
FileSmbProperties properties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(),
smbProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(),
smbProperties.getFileLastWriteTime());
assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes());
}
@Test
public void startCopyWithOptionsLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
}
@Test
public void startCopyWithOptionsInvalidLease() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = testResourceNamer.randomUuid();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions);
assertThrows(ShareStorageException.class, () -> primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000)).blockFirst());
}
@Test
public void startCopyWithOptionsMetadata() {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setMetadata(testMetadata);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void startCopyWithOptionsWithOriginalSmbProperties() {
primaryFileAsyncClient.create(1024).block();
ShareFileProperties initialProperties = primaryFileAsyncClient.getProperties().block();
assertNotNull(initialProperties);
OffsetDateTime creationTime = initialProperties.getSmbProperties().getFileCreationTime();
OffsetDateTime lastWrittenTime = initialProperties.getSmbProperties().getFileLastWriteTime();
OffsetDateTime changedTime = initialProperties.getSmbProperties().getFileChangeTime();
EnumSet<NtfsFileAttributes> fileAttributes = initialProperties.getSmbProperties().getNtfsFileAttributes();
String sourceURL = primaryFileAsyncClient.getFileUrl();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId);
CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList()
.setCreatedOn(true)
.setLastWrittenOn(true)
.setChangedOn(true)
.setFileAttributes(true);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setDestinationRequestConditions(conditions)
.setSmbPropertiesToCopy(list);
PollerFlux<ShareFileCopyInfo, Void> poller = primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000));
StepVerifier.create(poller).assertNext(it ->
assertNotNull(it.getValue().getCopyId())).expectComplete().verify(Duration.ofMinutes(1));
FileSmbProperties resultProperties = Objects.requireNonNull(primaryFileAsyncClient.getProperties().block())
.getSmbProperties();
FileShareTestHelper.compareDatesWithPrecision(creationTime, resultProperties.getFileCreationTime());
FileShareTestHelper.compareDatesWithPrecision(lastWrittenTime, resultProperties.getFileLastWriteTime());
FileShareTestHelper.compareDatesWithPrecision(changedTime, resultProperties.getFileChangeTime());
assertEquals(fileAttributes, resultProperties.getNtfsFileAttributes());
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void startCopyWithOptionsCopySourceFileError(boolean createdOn, boolean lastWrittenOn, boolean changedOn,
boolean fileAttributes) {
primaryFileAsyncClient.create(1024).block();
String sourceURL = primaryFileAsyncClient.getFileUrl();
EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE);
CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList()
.setCreatedOn(createdOn)
.setLastWrittenOn(lastWrittenOn)
.setChangedOn(changedOn)
.setFileAttributes(fileAttributes);
smbProperties
.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFileChangeTime(testResourceNamer.now())
.setNtfsFileAttributes(ntfs);
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(FILE_PERMISSION)
.setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE)
.setSmbPropertiesToCopy(list);
assertThrows(IllegalArgumentException.class, () -> primaryFileAsyncClient.beginCopy(sourceURL, options,
getPollingDuration(1000)));
}
@Disabled("TODO: Need to find a way of mocking pending copy status")
@Test
public void abortCopy() {
}
@Test
public void deleteFile() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse()).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileError() {
StepVerifier.create(primaryFileAsyncClient.delete()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void deleteFileLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse(new ShareRequestConditions().setLeaseId(leaseId)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteWithResponse(new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void deleteIfExistsFile() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(null)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileThatDoesNotExist() {
ShareFileAsyncClient client = primaryFileAsyncClient.getFileAsyncClient(generateShareName());
Response<Boolean> response = client.deleteIfExistsWithResponse(null, null).block();
assertNotNull(response);
assertFalse(response.getValue());
assertEquals(response.getStatusCode(), 404);
assertNotEquals(Boolean.TRUE, client.exists().block());
}
@Test
public void deleteIfExistsFileThatWasAlreadyDeleted() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null, null, null).block();
assertEquals(Boolean.TRUE, primaryFileAsyncClient.deleteIfExists().block());
assertNotEquals(Boolean.TRUE, primaryFileAsyncClient.deleteIfExists().block());
}
@Test
public void deleteIfExistsFileLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(new ShareRequestConditions()
.setLeaseId(leaseId))).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202))
.verifyComplete();
}
@Test
public void deleteIfExistsFileLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.deleteIfExistsWithResponse(new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
}
@Test
public void getProperties() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getETag());
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getLastModified());
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void getPropertiesLease() {
primaryFileAsyncClient.create(1024).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse(
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void getPropertiesLeaseFail() {
primaryFileAsyncClient.create(1024).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.getPropertiesWithResponse(
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void getPropertiesError() {
StepVerifier.create(primaryFileAsyncClient.getProperties())
.verifyErrorSatisfies(it -> assertInstanceOf(ShareStorageException.class, it));
}
@Test
public void setHttpHeadersFpk() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String filePermissionKey = shareClient.createPermission(FILE_PERMISSION);
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(filePermissionKey);
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, httpHeaders, smbProperties, null))
.assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void setHttpHeadersFp() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, httpHeaders, smbProperties,
FILE_PERMISSION)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue().getSmbProperties());
assertNotNull(it.getValue().getSmbProperties().getFilePermissionKey());
assertNotNull(it.getValue().getSmbProperties().getNtfsFileAttributes());
assertNotNull(it.getValue().getSmbProperties().getFileLastWriteTime());
assertNotNull(it.getValue().getSmbProperties().getFileCreationTime());
assertNotNull(it.getValue().getSmbProperties().getFileChangeTime());
assertNotNull(it.getValue().getSmbProperties().getParentId());
assertNotNull(it.getValue().getSmbProperties().getFileId());
}).verifyComplete();
}
@Test
public void setHttpHeadersLease() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, null, null, null,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void setHttpHeadersLeaseFail() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setPropertiesWithResponse(512, null, null, null,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void setHttpHeadersError() {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
StepVerifier.create(primaryFileAsyncClient.setProperties(-1, null, null, null)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400, ShareErrorCode.OUT_OF_RANGE_INPUT));
}
@Test
public void setMetadata() {
primaryFileAsyncClient.createWithResponse(1024, httpHeaders, null, null, testMetadata).block();
Map<String, String> updatedMetadata = Collections.singletonMap("update", "value");
StepVerifier.create(primaryFileAsyncClient.getProperties()).assertNext(it ->
assertEquals(testMetadata, it.getMetadata())).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(updatedMetadata))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
StepVerifier.create(primaryFileAsyncClient.getProperties()).assertNext(it ->
assertEquals(updatedMetadata, it.getMetadata())).verifyComplete();
}
@Test
public void setMetadataError() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> errorMetadata = Collections.singletonMap("", "value");
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(errorMetadata))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void setMetadataLease() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> metadata = Collections.singletonMap("key", "value");
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(metadata,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void setMetadataLeaseFail() {
primaryFileAsyncClient.create(1024).block();
Map<String, String> metadata = Collections.singletonMap("key", "value");
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.setMetadataWithResponse(metadata,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void listRanges() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
StepVerifier.create(primaryFileAsyncClient.listRanges()).assertNext(it -> {
assertEquals(0, it.getStart());
assertEquals(1023, it.getEnd());
}).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesWithRange() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
StepVerifier.create(primaryFileAsyncClient.listRanges(new ShareFileRange(0, 511L)))
.assertNext(it -> {
assertEquals(0, it.getStart());
assertEquals(511, it.getEnd());
}).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesLease() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
String leaseId = createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.listRanges(null, new ShareRequestConditions().setLeaseId(leaseId)))
.expectNextCount(1).verifyComplete();
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@Test
public void listRangesLeaseFail() throws IOException {
primaryFileAsyncClient.createWithResponse(1024, null, null, null, null).block();
String fileName = generatePathName();
String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName);
primaryFileAsyncClient.uploadFromFile(uploadFile).block();
createLeaseClient(primaryFileAsyncClient).acquireLease().block();
StepVerifier.create(primaryFileAsyncClient.listRanges(null, new ShareRequestConditions()
.setLeaseId(testResourceNamer.randomUuid()))).verifyError(ShareStorageException.class);
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void listRangesDiff(List<FileRange> rangesToUpdate, List<FileRange> rangesToClear,
List<FileRange> expectedRanges, List<ClearRange> expectedClearRanges) {
String snapshotId = primaryFileAsyncClient.create(4 * Constants.MB)
.then(primaryFileAsyncClient.upload(Flux.just(FileShareTestHelper.getRandomByteBuffer(4 * Constants.MB)),
4 * Constants.MB))
.then(primaryFileServiceAsyncClient.getShareAsyncClient(primaryFileAsyncClient.getShareName())
.createSnapshot()
.map(ShareSnapshotInfo::getSnapshot))
.block();
Flux.fromIterable(rangesToUpdate)
.flatMap(it -> {
int size = (int) (it.getEnd() - it.getStart() + 1);
return primaryFileAsyncClient.uploadWithResponse(Flux.just(
FileShareTestHelper.getRandomByteBuffer(size)), size, it.getStart());
}).blockLast();
Flux.fromIterable(rangesToClear)
.flatMap(it -> primaryFileAsyncClient.clearRangeWithResponse(it.getEnd() - it.getStart() + 1,
it.getStart()))
.blockLast();
StepVerifier.create(primaryFileAsyncClient.listRangesDiff(snapshotId)).assertNext(it -> {
assertEquals(it.getRanges().size(), expectedRanges.size());
assertEquals(it.getClearRanges().size(), expectedClearRanges.size());
for (int i = 0; i < expectedRanges.size(); i++) {
FileRange actualRange = it.getRanges().get(i);
FileRange expectedRange = expectedRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
for (int i = 0; i < expectedClearRanges.size(); i++) {
ClearRange actualRange = it.getClearRanges().get(i);
ClearRange expectedRange = expectedClearRanges.get(i);
assertEquals(expectedRange.getStart(), actualRange.getStart());
assertEquals(expectedRange.getEnd(), actualRange.getEnd());
}
}).verifyComplete();
}
@Test
public void listHandles() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.listHandles()).verifyComplete();
}
@Test
public void listHandlesWithMaxResult() {
primaryFileAsyncClient.create(1024).block();
StepVerifier.create(primaryFileAsyncClient.listHandles(2)).verifyComplete();
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void forceCloseHandleMin() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseHandle("1")).assertNext(it -> {
assertEquals(it.getClosedHandles(), 0);
assertEquals(it.getFailedHandles(), 0);
}).verifyComplete();
}
@Test
public void forceCloseHandleInvalidHandleID() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseHandle("invalidHandleId"))
.verifyErrorSatisfies(it -> assertInstanceOf(ShareStorageException.class, it));
}
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void forceCloseAllHandlesMin() {
primaryFileAsyncClient.create(512).block();
StepVerifier.create(primaryFileAsyncClient.forceCloseAllHandles())
.assertNext(it -> {
assertEquals(it.getClosedHandles(), 0);
assertEquals(it.getFailedHandles(), 0);
}).verifyComplete();
}
@Test
public void getSnapshotId() {
String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString();
ShareFileAsyncClient shareSnapshotClient = fileBuilderHelper(shareName, filePath).snapshot(snapshot)
.buildFileAsyncClient();
assertEquals(snapshot, shareSnapshotClient.getShareSnapshotId());
}
@Test
public void getShareName() {
assertEquals(shareName, primaryFileAsyncClient.getShareName());
}
@Test
public void getFilePath() {
assertEquals(filePath, primaryFileAsyncClient.getFilePath());
}
@EnabledIf("com.azure.storage.file.share.FileShareTestBase
@DisabledIf("com.azure.storage.file.share.FileShareTestBase
@Test
public void listHandlesClientName() {
ShareAsyncClient client = primaryFileServiceAsyncClient.getShareAsyncClient("testing");
ShareDirectoryAsyncClient directoryClient = client.getDirectoryClient("dir1");
ShareFileAsyncClient fileClient = directoryClient.getFileClient("test.txt");
List<HandleItem> list = fileClient.listHandles().collectList().block();
assertNotNull(list.get(0).getClientName());
}
} |
This should use `assertInstanceOf` as that will produce a better exception message if this fails. | public void ioExceptionInErrorDeserializationReturnsException() {
JacksonAdapter ioExceptionThrower = new JacksonAdapter() {
@Override
public <T> T deserialize(byte[] bytes, Type type, SerializerEncoding encoding) throws IOException {
throw new IOException();
}
};
HttpResponseDecodeData noExpectedStatusCodes = new MockHttpResponseDecodeData(
new UnexpectedExceptionInformation(HttpResponseException.class));
HttpResponse response = new MockHttpResponse(GET_REQUEST, 300);
Object deserializedResponse =
HttpResponseBodyDecoder.decodeByteArray(null, response, ioExceptionThrower, noExpectedStatusCodes);
assertTrue(deserializedResponse instanceof IOException);
} | assertTrue(deserializedResponse instanceof IOException); | public void ioExceptionInErrorDeserializationReturnsException() {
JacksonAdapter ioExceptionThrower = new JacksonAdapter() {
@Override
public <T> T deserialize(byte[] bytes, Type type, SerializerEncoding encoding) throws IOException {
throw new IOException();
}
};
HttpResponseDecodeData noExpectedStatusCodes = new MockHttpResponseDecodeData(
new UnexpectedExceptionInformation(HttpResponseException.class));
HttpResponse response = new MockHttpResponse(GET_REQUEST, 300);
assertInstanceOf(IOException.class,
HttpResponseBodyDecoder.decodeByteArray(null, response, ioExceptionThrower, noExpectedStatusCodes));
} | class HttpResponseBodyDecoderTests {
private static final JacksonAdapter ADAPTER = new JacksonAdapter();
private static final HttpRequest GET_REQUEST = new HttpRequest(HttpMethod.GET, "https:
private static final HttpRequest HEAD_REQUEST = new HttpRequest(HttpMethod.HEAD, "https:
@ParameterizedTest
@MethodSource("invalidHttpResponseSupplier")
public void invalidHttpResponse(HttpResponse response) {
assertThrows(NullPointerException.class,
() -> HttpResponseBodyDecoder.decodeByteArray(null, response, null, null));
}
private static Stream<Arguments> invalidHttpResponseSupplier() {
return Stream.of(
Arguments.of((HttpResponse) null),
Arguments.of(new MockHttpResponse(null, 200)),
Arguments.of(new MockHttpResponse(new HttpRequest(null, "https:
);
}
@ParameterizedTest
@MethodSource("errorResponseSupplier")
public void errorResponse(HttpResponse httpResponse, HttpResponseDecodeData decodeData,
boolean isEmpty, Object expected) {
StepVerifier.FirstStep<Object> firstStep = StepVerifier.create(httpResponse.getBodyAsByteArray()
.mapNotNull(body -> HttpResponseBodyDecoder.decodeByteArray(body, httpResponse, ADAPTER, decodeData)));
if (isEmpty) {
firstStep.verifyComplete();
} else {
firstStep.assertNext(actual -> assertEquals(expected, actual)).verifyComplete();
}
}
private static Stream<Arguments> errorResponseSupplier() {
UnexpectedExceptionInformation exceptionInformation = new MockUnexpectedExceptionInformation(
HttpResponseException.class, String.class);
HttpResponseDecodeData noExpectedStatusCodes = new MockHttpResponseDecodeData(exceptionInformation);
HttpResponseDecodeData expectedStatusCodes = new MockHttpResponseDecodeData(202, exceptionInformation);
HttpResponse emptyResponse = new MockHttpResponse(GET_REQUEST, 300, (Object) null);
HttpResponse response = new MockHttpResponse(GET_REQUEST, 300, "expected");
HttpResponse wrongGoodResponse = new MockHttpResponse(GET_REQUEST, 200, "good response");
return Stream.of(
Arguments.of(emptyResponse, noExpectedStatusCodes, true, null),
Arguments.of(emptyResponse, expectedStatusCodes, true, null),
Arguments.of(response, noExpectedStatusCodes, false, "expected"),
Arguments.of(response, expectedStatusCodes, false, "expected"),
Arguments.of(wrongGoodResponse, expectedStatusCodes, false, "good response"),
Arguments.of(emptyResponse, noExpectedStatusCodes, true, null)
);
}
@Test
@Test
public void headRequestReturnsEmpty() {
HttpResponseDecodeData decodeData = new MockHttpResponseDecodeData(200);
HttpResponse response = new MockHttpResponse(HEAD_REQUEST, 200);
assertNull(HttpResponseBodyDecoder.decodeByteArray(null, response, ADAPTER, decodeData));
}
@ParameterizedTest
@MethodSource("nonDecodableResponseSupplier")
public void nonDecodableResponse(HttpResponseDecodeData decodeData) {
HttpResponse response = new MockHttpResponse(GET_REQUEST, 200);
assertNull(HttpResponseBodyDecoder.decodeByteArray(null, response, ADAPTER, decodeData));
}
private static Stream<Arguments> nonDecodableResponseSupplier() {
HttpResponseDecodeData nullReturnType = new MockHttpResponseDecodeData(200, null, false);
ParameterizedType fluxByteBuffer = mockParameterizedType(Flux.class, ByteBuffer.class);
HttpResponseDecodeData fluxByteBufferReturnType = new MockHttpResponseDecodeData(200, fluxByteBuffer, false);
ParameterizedType monoByteArray = mockParameterizedType(Mono.class, byte[].class);
HttpResponseDecodeData monoByteArrayReturnType = new MockHttpResponseDecodeData(200, monoByteArray, false);
ParameterizedType voidTypeResponse = mockParameterizedType(ResponseBase.class, int.class, Void.TYPE);
HttpResponseDecodeData voidTypeResponseReturnType = new MockHttpResponseDecodeData(200, voidTypeResponse, false);
ParameterizedType voidClassResponse = mockParameterizedType(ResponseBase.class, int.class, void.class);
HttpResponseDecodeData voidClassResponseReturnType = new MockHttpResponseDecodeData(200, voidClassResponse,
false);
return Stream.of(
Arguments.of(nullReturnType),
Arguments.of(fluxByteBufferReturnType),
Arguments.of(monoByteArrayReturnType),
Arguments.of(voidTypeResponseReturnType),
Arguments.of(voidClassResponseReturnType)
);
}
@Test
public void emptyResponseReturnsMonoEmpty() {
HttpResponse response = new MockHttpResponse(GET_REQUEST, 200, (Object) null);
HttpResponseDecodeData decodeData = new MockHttpResponseDecodeData(200, String.class, true);
assertNull(HttpResponseBodyDecoder.decodeByteArray(null, response, ADAPTER, decodeData));
}
@ParameterizedTest
@MethodSource("decodableResponseSupplier")
public void decodableResponse(HttpResponse response, HttpResponseDecodeData decodeData, Object expected) {
StepVerifier.create(response.getBodyAsByteArray()
.mapNotNull(bytes -> HttpResponseBodyDecoder.decodeByteArray(bytes, response, ADAPTER, decodeData)))
.assertNext(actual -> assertEquals(expected, actual))
.verifyComplete();
}
private static Stream<Arguments> decodableResponseSupplier() {
HttpResponseDecodeData stringDecodeData = new MockHttpResponseDecodeData(200, String.class, String.class, true);
HttpResponse stringResponse = new MockHttpResponse(GET_REQUEST, 200, "hello");
HttpResponseDecodeData offsetDateTimeDecodeData = new MockHttpResponseDecodeData(200, OffsetDateTime.class,
OffsetDateTime.class, true);
OffsetDateTime offsetDateTimeNow = OffsetDateTime.now(ZoneOffset.UTC);
HttpResponse offsetDateTimeResponse = new MockHttpResponse(GET_REQUEST, 200, offsetDateTimeNow);
HttpResponseDecodeData dateTimeRfc1123DecodeData = new MockHttpResponseDecodeData(200, OffsetDateTime.class,
DateTimeRfc1123.class, true);
DateTimeRfc1123 dateTimeRfc1123Now = new DateTimeRfc1123(offsetDateTimeNow);
HttpResponse dateTimeRfc1123Response = new MockHttpResponse(GET_REQUEST, 200, dateTimeRfc1123Now);
HttpResponseDecodeData unixTimeDecodeData = new MockHttpResponseDecodeData(200, OffsetDateTime.class,
OffsetDateTime.class, true);
HttpResponse unixTimeResponse = new MockHttpResponse(GET_REQUEST, 200, offsetDateTimeNow);
ParameterizedType stringList = mockParameterizedType(List.class, String.class);
HttpResponseDecodeData stringListDecodeData = new MockHttpResponseDecodeData(200, stringList, String.class, true);
List<String> list = Arrays.asList("hello", "azure");
HttpResponse stringListResponse = new MockHttpResponse(GET_REQUEST, 200, list);
ParameterizedType mapStringString = mockParameterizedType(Map.class, String.class, String.class);
HttpResponseDecodeData mapStringStringDecodeData = new MockHttpResponseDecodeData(200, mapStringString,
String.class, true);
Map<String, String> map = Collections.singletonMap("hello", "azure");
HttpResponse mapStringStringResponse = new MockHttpResponse(GET_REQUEST, 200, map);
return Stream.of(
Arguments.of(stringResponse, stringDecodeData, "hello"),
Arguments.of(offsetDateTimeResponse, offsetDateTimeDecodeData, offsetDateTimeNow),
Arguments.of(dateTimeRfc1123Response, dateTimeRfc1123DecodeData,
new DateTimeRfc1123(dateTimeRfc1123Now.toString()).getDateTime()),
Arguments.of(unixTimeResponse, unixTimeDecodeData, offsetDateTimeNow),
Arguments.of(stringListResponse, stringListDecodeData, list),
Arguments.of(mapStringStringResponse, mapStringStringDecodeData, map)
);
}
@Test
public void decodeListBase64UrlResponse() {
ParameterizedType parameterizedType = mockParameterizedType(List.class, byte[].class);
HttpResponseDecodeData decodeData = new MockHttpResponseDecodeData(200, parameterizedType, Base64Url.class, true);
List<Base64Url> base64Urls = Arrays.asList(new Base64Url("base"), new Base64Url("64"));
HttpResponse response = new MockHttpResponse(GET_REQUEST, 200, base64Urls);
StepVerifier.create(response.getBodyAsByteArray()
.mapNotNull(body -> HttpResponseBodyDecoder.decodeByteArray(body, response, ADAPTER, decodeData)))
.assertNext(actual -> {
assertTrue(actual instanceof List);
@SuppressWarnings("unchecked") List<byte[]> decoded = (List<byte[]>) actual;
assertEquals(2, decoded.size());
assertArraysEqual(base64Urls.get(0).decodedBytes(), decoded.get(0));
assertArraysEqual(base64Urls.get(1).decodedBytes(), decoded.get(1));
}).verifyComplete();
}
@SuppressWarnings("unchecked")
@Test
public void decodePageResponse() {
HttpResponse response = new MockHttpResponse(GET_REQUEST, 200, new Page<String>() {
@Override
public IterableStream<String> getElements() {
return IterableStream.of(null);
}
@Override
public String getContinuationToken() {
return null;
}
});
HttpResponseDecodeData pageDecodeData = new MockHttpResponseDecodeData(200, String.class, Page.class, true);
HttpResponseDecodeData itemPageDecodeData = new MockHttpResponseDecodeData(200, String.class, ItemPage.class,
true);
StepVerifier.create(response.getBodyAsByteArray()
.mapNotNull(body -> HttpResponseBodyDecoder.decodeByteArray(body, response, ADAPTER, pageDecodeData)))
.assertNext(actual -> {
assertTrue(actual instanceof Page);
Page<String> page = (Page<String>) actual;
assertFalse(page.getElements().iterator().hasNext());
assertNull(page.getContinuationToken());
}).verifyComplete();
StepVerifier.create(response.getBodyAsByteArray()
.mapNotNull(body -> HttpResponseBodyDecoder.decodeByteArray(body, response, ADAPTER,
itemPageDecodeData)))
.assertNext(actual -> {
assertTrue(actual instanceof Page);
Page<String> page = (Page<String>) actual;
assertFalse(page.getElements().iterator().hasNext());
assertNull(page.getContinuationToken());
}).verifyComplete();
}
@Test
public void malformedBodyReturnsError() {
HttpResponse response = new MockHttpResponse(GET_REQUEST, 200, (Object) null);
HttpResponseDecodeData decodeData = new MockHttpResponseDecodeData(200, String.class, String.class, true);
assertThrows(HttpResponseException.class, () -> HttpResponseBodyDecoder.decodeByteArray(
"malformed JSON string".getBytes(StandardCharsets.UTF_8), response, ADAPTER, decodeData));
}
@Test
public void ioExceptionReturnsError() throws IOException {
HttpResponse response = new MockHttpResponse(GET_REQUEST, 200, "valid JSON string");
HttpResponseDecodeData decodeData = new MockHttpResponseDecodeData(200, String.class, String.class, true);
SerializerAdapter serializer = new MockSerializerAdapter() {
@Override
public <T> T deserialize(byte[] bytes, Type type, SerializerEncoding encoding) throws IOException {
throw new IOException();
}
};
assertThrows(HttpResponseException.class, () ->
HttpResponseBodyDecoder.decodeByteArray(new byte[0], response, serializer, decodeData));
}
@ParameterizedTest
@MethodSource("decodeTypeSupplier")
public void decodeType(HttpResponse response, HttpResponseDecodeData data, Type expected) {
assertEquals(expected, HttpResponseBodyDecoder.decodedType(response, data));
}
private static Stream<Arguments> decodeTypeSupplier() {
HttpResponse badResponse = new MockHttpResponse(GET_REQUEST, 400);
HttpResponse headResponse = new MockHttpResponse(HEAD_REQUEST, 200);
HttpResponse getResponse = new MockHttpResponse(GET_REQUEST, 200);
HttpResponseDecodeData badResponseData = new MockHttpResponseDecodeData(-1,
new UnexpectedExceptionInformation(HttpResponseException.class));
HttpResponseDecodeData nonDecodable = new MockHttpResponseDecodeData(200, void.class, false);
HttpResponseDecodeData stringReturn = new MockHttpResponseDecodeData(200, String.class, true);
ParameterizedType monoString = mockParameterizedType(Mono.class, String.class);
HttpResponseDecodeData monoStringReturn = new MockHttpResponseDecodeData(200, monoString, true);
ParameterizedType responseString = mockParameterizedType(Response.class, String.class);
HttpResponseDecodeData responseStringReturn = new MockHttpResponseDecodeData(200, responseString, true);
HttpResponseDecodeData headDecodeData = new MockHttpResponseDecodeData(200, null, false);
return Stream.of(
Arguments.of(badResponse, badResponseData, Object.class),
Arguments.of(headResponse, headDecodeData, null),
Arguments.of(getResponse, nonDecodable, null),
Arguments.of(getResponse, stringReturn, String.class),
Arguments.of(getResponse, monoStringReturn, String.class),
Arguments.of(getResponse, responseStringReturn, String.class)
);
}
private static ParameterizedType mockParameterizedType(Type rawType, Type... actualTypeArguments) {
return new ParameterizedType() {
@Override
public Type[] getActualTypeArguments() {
return actualTypeArguments;
}
@Override
public Type getRawType() {
return rawType;
}
@Override
public Type getOwnerType() {
return null;
}
};
}
private static final class MockUnexpectedExceptionInformation extends UnexpectedExceptionInformation {
private final Class<?> exceptionBodyType;
/**
* Creates an UnexpectedExceptionInformation object with the given exception type and expected response body.
*
* @param exceptionType Exception type to be thrown.
*/
MockUnexpectedExceptionInformation(Class<? extends HttpResponseException> exceptionType,
Class<?> exceptionBodyType) {
super(exceptionType);
this.exceptionBodyType = exceptionBodyType;
}
@Override
public Class<? extends HttpResponseException> getExceptionType() {
return super.getExceptionType();
}
@Override
public Class<?> getExceptionBodyType() {
return exceptionBodyType;
}
}
} | class HttpResponseBodyDecoderTests {
private static final JacksonAdapter ADAPTER = new JacksonAdapter();
private static final HttpRequest GET_REQUEST = new HttpRequest(HttpMethod.GET, "https:
private static final HttpRequest HEAD_REQUEST = new HttpRequest(HttpMethod.HEAD, "https:
@ParameterizedTest
@MethodSource("invalidHttpResponseSupplier")
public void invalidHttpResponse(HttpResponse response) {
assertThrows(NullPointerException.class,
() -> HttpResponseBodyDecoder.decodeByteArray(null, response, null, null));
}
private static Stream<Arguments> invalidHttpResponseSupplier() {
return Stream.of(
Arguments.of((HttpResponse) null),
Arguments.of(new MockHttpResponse(null, 200)),
Arguments.of(new MockHttpResponse(new HttpRequest(null, "https:
);
}
@ParameterizedTest
@MethodSource("errorResponseSupplier")
public void errorResponse(HttpResponse httpResponse, HttpResponseDecodeData decodeData,
boolean isEmpty, Object expected) {
StepVerifier.FirstStep<Object> firstStep = StepVerifier.create(httpResponse.getBodyAsByteArray()
.mapNotNull(body -> HttpResponseBodyDecoder.decodeByteArray(body, httpResponse, ADAPTER, decodeData)));
if (isEmpty) {
firstStep.verifyComplete();
} else {
firstStep.assertNext(actual -> assertEquals(expected, actual)).verifyComplete();
}
}
private static Stream<Arguments> errorResponseSupplier() {
UnexpectedExceptionInformation exceptionInformation = new MockUnexpectedExceptionInformation(
HttpResponseException.class, String.class);
HttpResponseDecodeData noExpectedStatusCodes = new MockHttpResponseDecodeData(exceptionInformation);
HttpResponseDecodeData expectedStatusCodes = new MockHttpResponseDecodeData(202, exceptionInformation);
HttpResponse emptyResponse = new MockHttpResponse(GET_REQUEST, 300, (Object) null);
HttpResponse response = new MockHttpResponse(GET_REQUEST, 300, "expected");
HttpResponse wrongGoodResponse = new MockHttpResponse(GET_REQUEST, 200, "good response");
return Stream.of(
Arguments.of(emptyResponse, noExpectedStatusCodes, true, null),
Arguments.of(emptyResponse, expectedStatusCodes, true, null),
Arguments.of(response, noExpectedStatusCodes, false, "expected"),
Arguments.of(response, expectedStatusCodes, false, "expected"),
Arguments.of(wrongGoodResponse, expectedStatusCodes, false, "good response"),
Arguments.of(emptyResponse, noExpectedStatusCodes, true, null)
);
}
@Test
@Test
public void headRequestReturnsEmpty() {
HttpResponseDecodeData decodeData = new MockHttpResponseDecodeData(200);
HttpResponse response = new MockHttpResponse(HEAD_REQUEST, 200);
assertNull(HttpResponseBodyDecoder.decodeByteArray(null, response, ADAPTER, decodeData));
}
@ParameterizedTest
@MethodSource("nonDecodableResponseSupplier")
public void nonDecodableResponse(HttpResponseDecodeData decodeData) {
HttpResponse response = new MockHttpResponse(GET_REQUEST, 200);
assertNull(HttpResponseBodyDecoder.decodeByteArray(null, response, ADAPTER, decodeData));
}
private static Stream<Arguments> nonDecodableResponseSupplier() {
HttpResponseDecodeData nullReturnType = new MockHttpResponseDecodeData(200, null, false);
ParameterizedType fluxByteBuffer = mockParameterizedType(Flux.class, ByteBuffer.class);
HttpResponseDecodeData fluxByteBufferReturnType = new MockHttpResponseDecodeData(200, fluxByteBuffer, false);
ParameterizedType monoByteArray = mockParameterizedType(Mono.class, byte[].class);
HttpResponseDecodeData monoByteArrayReturnType = new MockHttpResponseDecodeData(200, monoByteArray, false);
ParameterizedType voidTypeResponse = mockParameterizedType(ResponseBase.class, int.class, Void.TYPE);
HttpResponseDecodeData voidTypeResponseReturnType = new MockHttpResponseDecodeData(200, voidTypeResponse, false);
ParameterizedType voidClassResponse = mockParameterizedType(ResponseBase.class, int.class, void.class);
HttpResponseDecodeData voidClassResponseReturnType = new MockHttpResponseDecodeData(200, voidClassResponse,
false);
return Stream.of(
Arguments.of(nullReturnType),
Arguments.of(fluxByteBufferReturnType),
Arguments.of(monoByteArrayReturnType),
Arguments.of(voidTypeResponseReturnType),
Arguments.of(voidClassResponseReturnType)
);
}
@Test
public void emptyResponseReturnsMonoEmpty() {
HttpResponse response = new MockHttpResponse(GET_REQUEST, 200, (Object) null);
HttpResponseDecodeData decodeData = new MockHttpResponseDecodeData(200, String.class, true);
assertNull(HttpResponseBodyDecoder.decodeByteArray(null, response, ADAPTER, decodeData));
}
@ParameterizedTest
@MethodSource("decodableResponseSupplier")
public void decodableResponse(HttpResponse response, HttpResponseDecodeData decodeData, Object expected) {
StepVerifier.create(response.getBodyAsByteArray()
.mapNotNull(bytes -> HttpResponseBodyDecoder.decodeByteArray(bytes, response, ADAPTER, decodeData)))
.assertNext(actual -> assertEquals(expected, actual))
.verifyComplete();
}
private static Stream<Arguments> decodableResponseSupplier() {
HttpResponseDecodeData stringDecodeData = new MockHttpResponseDecodeData(200, String.class, String.class, true);
HttpResponse stringResponse = new MockHttpResponse(GET_REQUEST, 200, "hello");
HttpResponseDecodeData offsetDateTimeDecodeData = new MockHttpResponseDecodeData(200, OffsetDateTime.class,
OffsetDateTime.class, true);
OffsetDateTime offsetDateTimeNow = OffsetDateTime.now(ZoneOffset.UTC);
HttpResponse offsetDateTimeResponse = new MockHttpResponse(GET_REQUEST, 200, offsetDateTimeNow);
HttpResponseDecodeData dateTimeRfc1123DecodeData = new MockHttpResponseDecodeData(200, OffsetDateTime.class,
DateTimeRfc1123.class, true);
DateTimeRfc1123 dateTimeRfc1123Now = new DateTimeRfc1123(offsetDateTimeNow);
HttpResponse dateTimeRfc1123Response = new MockHttpResponse(GET_REQUEST, 200, dateTimeRfc1123Now);
HttpResponseDecodeData unixTimeDecodeData = new MockHttpResponseDecodeData(200, OffsetDateTime.class,
OffsetDateTime.class, true);
HttpResponse unixTimeResponse = new MockHttpResponse(GET_REQUEST, 200, offsetDateTimeNow);
ParameterizedType stringList = mockParameterizedType(List.class, String.class);
HttpResponseDecodeData stringListDecodeData = new MockHttpResponseDecodeData(200, stringList, String.class, true);
List<String> list = Arrays.asList("hello", "azure");
HttpResponse stringListResponse = new MockHttpResponse(GET_REQUEST, 200, list);
ParameterizedType mapStringString = mockParameterizedType(Map.class, String.class, String.class);
HttpResponseDecodeData mapStringStringDecodeData = new MockHttpResponseDecodeData(200, mapStringString,
String.class, true);
Map<String, String> map = Collections.singletonMap("hello", "azure");
HttpResponse mapStringStringResponse = new MockHttpResponse(GET_REQUEST, 200, map);
return Stream.of(
Arguments.of(stringResponse, stringDecodeData, "hello"),
Arguments.of(offsetDateTimeResponse, offsetDateTimeDecodeData, offsetDateTimeNow),
Arguments.of(dateTimeRfc1123Response, dateTimeRfc1123DecodeData,
new DateTimeRfc1123(dateTimeRfc1123Now.toString()).getDateTime()),
Arguments.of(unixTimeResponse, unixTimeDecodeData, offsetDateTimeNow),
Arguments.of(stringListResponse, stringListDecodeData, list),
Arguments.of(mapStringStringResponse, mapStringStringDecodeData, map)
);
}
@Test
public void decodeListBase64UrlResponse() {
ParameterizedType parameterizedType = mockParameterizedType(List.class, byte[].class);
HttpResponseDecodeData decodeData = new MockHttpResponseDecodeData(200, parameterizedType, Base64Url.class, true);
List<Base64Url> base64Urls = Arrays.asList(new Base64Url("base"), new Base64Url("64"));
HttpResponse response = new MockHttpResponse(GET_REQUEST, 200, base64Urls);
StepVerifier.create(response.getBodyAsByteArray()
.mapNotNull(body -> HttpResponseBodyDecoder.decodeByteArray(body, response, ADAPTER, decodeData)))
.assertNext(actual -> {
assertTrue(actual instanceof List);
@SuppressWarnings("unchecked") List<byte[]> decoded = (List<byte[]>) actual;
assertEquals(2, decoded.size());
assertArraysEqual(base64Urls.get(0).decodedBytes(), decoded.get(0));
assertArraysEqual(base64Urls.get(1).decodedBytes(), decoded.get(1));
}).verifyComplete();
}
@SuppressWarnings("unchecked")
@Test
public void decodePageResponse() {
HttpResponse response = new MockHttpResponse(GET_REQUEST, 200, new Page<String>() {
@Override
public IterableStream<String> getElements() {
return IterableStream.of(null);
}
@Override
public String getContinuationToken() {
return null;
}
});
HttpResponseDecodeData pageDecodeData = new MockHttpResponseDecodeData(200, String.class, Page.class, true);
HttpResponseDecodeData itemPageDecodeData = new MockHttpResponseDecodeData(200, String.class, ItemPage.class,
true);
StepVerifier.create(response.getBodyAsByteArray()
.mapNotNull(body -> HttpResponseBodyDecoder.decodeByteArray(body, response, ADAPTER, pageDecodeData)))
.assertNext(actual -> {
assertTrue(actual instanceof Page);
Page<String> page = (Page<String>) actual;
assertFalse(page.getElements().iterator().hasNext());
assertNull(page.getContinuationToken());
}).verifyComplete();
StepVerifier.create(response.getBodyAsByteArray()
.mapNotNull(body -> HttpResponseBodyDecoder.decodeByteArray(body, response, ADAPTER,
itemPageDecodeData)))
.assertNext(actual -> {
assertTrue(actual instanceof Page);
Page<String> page = (Page<String>) actual;
assertFalse(page.getElements().iterator().hasNext());
assertNull(page.getContinuationToken());
}).verifyComplete();
}
@Test
public void malformedBodyReturnsError() {
HttpResponse response = new MockHttpResponse(GET_REQUEST, 200, (Object) null);
HttpResponseDecodeData decodeData = new MockHttpResponseDecodeData(200, String.class, String.class, true);
assertThrows(HttpResponseException.class, () -> HttpResponseBodyDecoder.decodeByteArray(
"malformed JSON string".getBytes(StandardCharsets.UTF_8), response, ADAPTER, decodeData));
}
@Test
public void ioExceptionReturnsError() throws IOException {
HttpResponse response = new MockHttpResponse(GET_REQUEST, 200, "valid JSON string");
HttpResponseDecodeData decodeData = new MockHttpResponseDecodeData(200, String.class, String.class, true);
SerializerAdapter serializer = new MockSerializerAdapter() {
@Override
public <T> T deserialize(byte[] bytes, Type type, SerializerEncoding encoding) throws IOException {
throw new IOException();
}
};
assertThrows(HttpResponseException.class, () ->
HttpResponseBodyDecoder.decodeByteArray(new byte[0], response, serializer, decodeData));
}
@ParameterizedTest
@MethodSource("decodeTypeSupplier")
public void decodeType(HttpResponse response, HttpResponseDecodeData data, Type expected) {
assertEquals(expected, HttpResponseBodyDecoder.decodedType(response, data));
}
private static Stream<Arguments> decodeTypeSupplier() {
HttpResponse badResponse = new MockHttpResponse(GET_REQUEST, 400);
HttpResponse headResponse = new MockHttpResponse(HEAD_REQUEST, 200);
HttpResponse getResponse = new MockHttpResponse(GET_REQUEST, 200);
HttpResponseDecodeData badResponseData = new MockHttpResponseDecodeData(-1,
new UnexpectedExceptionInformation(HttpResponseException.class));
HttpResponseDecodeData nonDecodable = new MockHttpResponseDecodeData(200, void.class, false);
HttpResponseDecodeData stringReturn = new MockHttpResponseDecodeData(200, String.class, true);
ParameterizedType monoString = mockParameterizedType(Mono.class, String.class);
HttpResponseDecodeData monoStringReturn = new MockHttpResponseDecodeData(200, monoString, true);
ParameterizedType responseString = mockParameterizedType(Response.class, String.class);
HttpResponseDecodeData responseStringReturn = new MockHttpResponseDecodeData(200, responseString, true);
HttpResponseDecodeData headDecodeData = new MockHttpResponseDecodeData(200, null, false);
return Stream.of(
Arguments.of(badResponse, badResponseData, Object.class),
Arguments.of(headResponse, headDecodeData, null),
Arguments.of(getResponse, nonDecodable, null),
Arguments.of(getResponse, stringReturn, String.class),
Arguments.of(getResponse, monoStringReturn, String.class),
Arguments.of(getResponse, responseStringReturn, String.class)
);
}
private static ParameterizedType mockParameterizedType(Type rawType, Type... actualTypeArguments) {
return new ParameterizedType() {
@Override
public Type[] getActualTypeArguments() {
return actualTypeArguments;
}
@Override
public Type getRawType() {
return rawType;
}
@Override
public Type getOwnerType() {
return null;
}
};
}
private static final class MockUnexpectedExceptionInformation extends UnexpectedExceptionInformation {
private final Class<?> exceptionBodyType;
/**
* Creates an UnexpectedExceptionInformation object with the given exception type and expected response body.
*
* @param exceptionType Exception type to be thrown.
*/
MockUnexpectedExceptionInformation(Class<? extends HttpResponseException> exceptionType,
Class<?> exceptionBodyType) {
super(exceptionType);
this.exceptionBodyType = exceptionBodyType;
}
@Override
public Class<? extends HttpResponseException> getExceptionType() {
return super.getExceptionType();
}
@Override
public Class<?> getExceptionBodyType() {
return exceptionBodyType;
}
}
} |
I'd check with @JonathanGiles if reflection is the best approach here. My concern is around portability to different Java environments and platforms. In this case, would it be better to have protected methods on the parent builder, so we can invoke them via super here to do this configuration ? | public InteractiveBrowserBrokerCredentialBuilder setWindowHandle(long windowHandle) {
try {
Field field = CredentialBuilderBase.class.getDeclaredField("identityClientOptions");
field.setAccessible(true);
IdentityClientOptions options = (IdentityClientOptions) field.get(this);
options.setBrokerWindowHandle(windowHandle);
} catch (NoSuchFieldException | IllegalAccessException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
return this;
} | field.setAccessible(true); | public InteractiveBrowserBrokerCredentialBuilder setWindowHandle(long windowHandle) {
CredentialBuilderBaseHelper.getClientOptions(this).setBrokerWindowHandle(windowHandle);
return this;
} | class InteractiveBrowserBrokerCredentialBuilder extends InteractiveBrowserCredentialBuilder {
private static final ClientLogger LOGGER = new ClientLogger(InteractiveBrowserBrokerCredentialBuilder.class);
/**
* Sets the parent window handle used by the broker. For use on Windows only.
*
* @param windowHandle The window handle of the current application, or 0 for a console application.
* @return An updated instance of this builder with the interactive browser broker configured.
*/
/**
* Enables Microsoft Account (MSA) pass-through. This allows the user to sign in with a Microsoft Account (MSA)
* instead of a work or school account.
*
* @return An updated instance of this builder with enable Legacy MSA Passthrough set to true.
*/
public InteractiveBrowserBrokerCredentialBuilder enableLegacyMsaPassthrough() {
Field field = null;
try {
field = CredentialBuilderBase.class.getDeclaredField("identityClientOptions");
field.setAccessible(true);
IdentityClientOptions options = (IdentityClientOptions) field.get(this);
options.setEnableLegacyMsaPassthrough(true);
} catch (NoSuchFieldException | IllegalAccessException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder clientOptions(ClientOptions clientOptions) {
super.clientOptions(clientOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder addPolicy(HttpPipelinePolicy policy) {
super.addPolicy(policy);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder port(int port) {
super.port(port);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) {
super.additionallyAllowedTenants(additionallyAllowedTenants);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder authenticationRecord(AuthenticationRecord authenticationRecord) {
super.authenticationRecord(authenticationRecord);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder browserCustomizationOptions(BrowserCustomizationOptions browserCustomizationOptions) {
super.browserCustomizationOptions(browserCustomizationOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder clientId(String clientId) {
super.clientId(clientId);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder configuration(Configuration configuration) {
super.configuration(configuration);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder authorityHost(String authorityHost) {
super.authorityHost(authorityHost);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder disableAutomaticAuthentication() {
super.disableAutomaticAuthentication();
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder disableInstanceDiscovery() {
super.disableInstanceDiscovery();
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder enableAccountIdentifierLogging() {
super.enableAccountIdentifierLogging();
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder enableUnsafeSupportLogging() {
super.enableUnsafeSupportLogging();
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder executorService(ExecutorService executorService) {
super.executorService(executorService);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder httpClient(HttpClient client) {
super.httpClient(client);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder httpLogOptions(HttpLogOptions logOptions) {
super.httpLogOptions(logOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder loginHint(String loginHint) {
super.loginHint(loginHint);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder pipeline(HttpPipeline pipeline) {
super.pipeline(pipeline);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder maxRetry(int maxRetry) {
super.maxRetry(maxRetry);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder redirectUrl(String redirectUrl) {
super.redirectUrl(redirectUrl);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) {
super.additionallyAllowedTenants(additionallyAllowedTenants);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder retryOptions(RetryOptions retryOptions) {
super.retryOptions(retryOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder retryPolicy(RetryPolicy retryPolicy) {
super.retryPolicy(retryPolicy);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder tenantId(String tenantId) {
super.tenantId(tenantId);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder retryTimeout(Function<Duration, Duration> retryTimeout) {
super.retryTimeout(retryTimeout);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder tokenCachePersistenceOptions(TokenCachePersistenceOptions tokenCachePersistenceOptions) {
super.tokenCachePersistenceOptions(tokenCachePersistenceOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder httpPipeline(HttpPipeline httpPipeline) {
super.httpPipeline(httpPipeline);
return this;
}
/**
* {@inheritDoc}
*/
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder proxyOptions(ProxyOptions proxyOptions) {
super.proxyOptions(proxyOptions);
return this;
}
} | class InteractiveBrowserBrokerCredentialBuilder extends InteractiveBrowserCredentialBuilder {
/**
* Sets the parent window handle used by the broker. For use on Windows only.
*
* @param windowHandle The window handle of the current application, or 0 for a console application.
* @return An updated instance of this builder with the interactive browser broker configured.
*/
/**
* Enables Microsoft Account (MSA) pass-through. This allows the user to sign in with a Microsoft Account (MSA)
* instead of a work or school account.
*
* @return An updated instance of this builder with enable Legacy MSA Passthrough set to true.
*/
public InteractiveBrowserBrokerCredentialBuilder enableLegacyMsaPassthrough() {
CredentialBuilderBaseHelper.getClientOptions(this).setEnableLegacyMsaPassthrough(true);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder clientOptions(ClientOptions clientOptions) {
super.clientOptions(clientOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder addPolicy(HttpPipelinePolicy policy) {
super.addPolicy(policy);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder port(int port) {
super.port(port);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) {
super.additionallyAllowedTenants(additionallyAllowedTenants);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder authenticationRecord(AuthenticationRecord authenticationRecord) {
super.authenticationRecord(authenticationRecord);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder browserCustomizationOptions(BrowserCustomizationOptions browserCustomizationOptions) {
super.browserCustomizationOptions(browserCustomizationOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder clientId(String clientId) {
super.clientId(clientId);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder configuration(Configuration configuration) {
super.configuration(configuration);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder authorityHost(String authorityHost) {
super.authorityHost(authorityHost);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder disableAutomaticAuthentication() {
super.disableAutomaticAuthentication();
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder disableInstanceDiscovery() {
super.disableInstanceDiscovery();
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder enableAccountIdentifierLogging() {
super.enableAccountIdentifierLogging();
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder enableUnsafeSupportLogging() {
super.enableUnsafeSupportLogging();
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder executorService(ExecutorService executorService) {
super.executorService(executorService);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder httpClient(HttpClient client) {
super.httpClient(client);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder httpLogOptions(HttpLogOptions logOptions) {
super.httpLogOptions(logOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder loginHint(String loginHint) {
super.loginHint(loginHint);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder pipeline(HttpPipeline pipeline) {
super.pipeline(pipeline);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder maxRetry(int maxRetry) {
super.maxRetry(maxRetry);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder redirectUrl(String redirectUrl) {
super.redirectUrl(redirectUrl);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) {
super.additionallyAllowedTenants(additionallyAllowedTenants);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder retryOptions(RetryOptions retryOptions) {
super.retryOptions(retryOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder retryPolicy(RetryPolicy retryPolicy) {
super.retryPolicy(retryPolicy);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder tenantId(String tenantId) {
super.tenantId(tenantId);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder retryTimeout(Function<Duration, Duration> retryTimeout) {
super.retryTimeout(retryTimeout);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder tokenCachePersistenceOptions(TokenCachePersistenceOptions tokenCachePersistenceOptions) {
super.tokenCachePersistenceOptions(tokenCachePersistenceOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder httpPipeline(HttpPipeline httpPipeline) {
super.httpPipeline(httpPipeline);
return this;
}
/**
* {@inheritDoc}
*/
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder proxyOptions(ProxyOptions proxyOptions) {
super.proxyOptions(proxyOptions);
return this;
}
} |
changed approach. For posterity, getting rid of a protected field was the goal here. | public InteractiveBrowserBrokerCredentialBuilder setWindowHandle(long windowHandle) {
try {
Field field = CredentialBuilderBase.class.getDeclaredField("identityClientOptions");
field.setAccessible(true);
IdentityClientOptions options = (IdentityClientOptions) field.get(this);
options.setBrokerWindowHandle(windowHandle);
} catch (NoSuchFieldException | IllegalAccessException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
return this;
} | field.setAccessible(true); | public InteractiveBrowserBrokerCredentialBuilder setWindowHandle(long windowHandle) {
CredentialBuilderBaseHelper.getClientOptions(this).setBrokerWindowHandle(windowHandle);
return this;
} | class InteractiveBrowserBrokerCredentialBuilder extends InteractiveBrowserCredentialBuilder {
private static final ClientLogger LOGGER = new ClientLogger(InteractiveBrowserBrokerCredentialBuilder.class);
/**
* Sets the parent window handle used by the broker. For use on Windows only.
*
* @param windowHandle The window handle of the current application, or 0 for a console application.
* @return An updated instance of this builder with the interactive browser broker configured.
*/
/**
* Enables Microsoft Account (MSA) pass-through. This allows the user to sign in with a Microsoft Account (MSA)
* instead of a work or school account.
*
* @return An updated instance of this builder with enable Legacy MSA Passthrough set to true.
*/
public InteractiveBrowserBrokerCredentialBuilder enableLegacyMsaPassthrough() {
Field field = null;
try {
field = CredentialBuilderBase.class.getDeclaredField("identityClientOptions");
field.setAccessible(true);
IdentityClientOptions options = (IdentityClientOptions) field.get(this);
options.setEnableLegacyMsaPassthrough(true);
} catch (NoSuchFieldException | IllegalAccessException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder clientOptions(ClientOptions clientOptions) {
super.clientOptions(clientOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder addPolicy(HttpPipelinePolicy policy) {
super.addPolicy(policy);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder port(int port) {
super.port(port);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) {
super.additionallyAllowedTenants(additionallyAllowedTenants);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder authenticationRecord(AuthenticationRecord authenticationRecord) {
super.authenticationRecord(authenticationRecord);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder browserCustomizationOptions(BrowserCustomizationOptions browserCustomizationOptions) {
super.browserCustomizationOptions(browserCustomizationOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder clientId(String clientId) {
super.clientId(clientId);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder configuration(Configuration configuration) {
super.configuration(configuration);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder authorityHost(String authorityHost) {
super.authorityHost(authorityHost);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder disableAutomaticAuthentication() {
super.disableAutomaticAuthentication();
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder disableInstanceDiscovery() {
super.disableInstanceDiscovery();
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder enableAccountIdentifierLogging() {
super.enableAccountIdentifierLogging();
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder enableUnsafeSupportLogging() {
super.enableUnsafeSupportLogging();
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder executorService(ExecutorService executorService) {
super.executorService(executorService);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder httpClient(HttpClient client) {
super.httpClient(client);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder httpLogOptions(HttpLogOptions logOptions) {
super.httpLogOptions(logOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder loginHint(String loginHint) {
super.loginHint(loginHint);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder pipeline(HttpPipeline pipeline) {
super.pipeline(pipeline);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder maxRetry(int maxRetry) {
super.maxRetry(maxRetry);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder redirectUrl(String redirectUrl) {
super.redirectUrl(redirectUrl);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) {
super.additionallyAllowedTenants(additionallyAllowedTenants);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder retryOptions(RetryOptions retryOptions) {
super.retryOptions(retryOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder retryPolicy(RetryPolicy retryPolicy) {
super.retryPolicy(retryPolicy);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder tenantId(String tenantId) {
super.tenantId(tenantId);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder retryTimeout(Function<Duration, Duration> retryTimeout) {
super.retryTimeout(retryTimeout);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder tokenCachePersistenceOptions(TokenCachePersistenceOptions tokenCachePersistenceOptions) {
super.tokenCachePersistenceOptions(tokenCachePersistenceOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder httpPipeline(HttpPipeline httpPipeline) {
super.httpPipeline(httpPipeline);
return this;
}
/**
* {@inheritDoc}
*/
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder proxyOptions(ProxyOptions proxyOptions) {
super.proxyOptions(proxyOptions);
return this;
}
} | class InteractiveBrowserBrokerCredentialBuilder extends InteractiveBrowserCredentialBuilder {
/**
* Sets the parent window handle used by the broker. For use on Windows only.
*
* @param windowHandle The window handle of the current application, or 0 for a console application.
* @return An updated instance of this builder with the interactive browser broker configured.
*/
/**
* Enables Microsoft Account (MSA) pass-through. This allows the user to sign in with a Microsoft Account (MSA)
* instead of a work or school account.
*
* @return An updated instance of this builder with enable Legacy MSA Passthrough set to true.
*/
public InteractiveBrowserBrokerCredentialBuilder enableLegacyMsaPassthrough() {
CredentialBuilderBaseHelper.getClientOptions(this).setEnableLegacyMsaPassthrough(true);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder clientOptions(ClientOptions clientOptions) {
super.clientOptions(clientOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder addPolicy(HttpPipelinePolicy policy) {
super.addPolicy(policy);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder port(int port) {
super.port(port);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder additionallyAllowedTenants(String... additionallyAllowedTenants) {
super.additionallyAllowedTenants(additionallyAllowedTenants);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder authenticationRecord(AuthenticationRecord authenticationRecord) {
super.authenticationRecord(authenticationRecord);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder browserCustomizationOptions(BrowserCustomizationOptions browserCustomizationOptions) {
super.browserCustomizationOptions(browserCustomizationOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder clientId(String clientId) {
super.clientId(clientId);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder configuration(Configuration configuration) {
super.configuration(configuration);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder authorityHost(String authorityHost) {
super.authorityHost(authorityHost);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder disableAutomaticAuthentication() {
super.disableAutomaticAuthentication();
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder disableInstanceDiscovery() {
super.disableInstanceDiscovery();
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder enableAccountIdentifierLogging() {
super.enableAccountIdentifierLogging();
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder enableUnsafeSupportLogging() {
super.enableUnsafeSupportLogging();
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder executorService(ExecutorService executorService) {
super.executorService(executorService);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder httpClient(HttpClient client) {
super.httpClient(client);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder httpLogOptions(HttpLogOptions logOptions) {
super.httpLogOptions(logOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder loginHint(String loginHint) {
super.loginHint(loginHint);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder pipeline(HttpPipeline pipeline) {
super.pipeline(pipeline);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder maxRetry(int maxRetry) {
super.maxRetry(maxRetry);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder redirectUrl(String redirectUrl) {
super.redirectUrl(redirectUrl);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder additionallyAllowedTenants(List<String> additionallyAllowedTenants) {
super.additionallyAllowedTenants(additionallyAllowedTenants);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder retryOptions(RetryOptions retryOptions) {
super.retryOptions(retryOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder retryPolicy(RetryPolicy retryPolicy) {
super.retryPolicy(retryPolicy);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder tenantId(String tenantId) {
super.tenantId(tenantId);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder retryTimeout(Function<Duration, Duration> retryTimeout) {
super.retryTimeout(retryTimeout);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder tokenCachePersistenceOptions(TokenCachePersistenceOptions tokenCachePersistenceOptions) {
super.tokenCachePersistenceOptions(tokenCachePersistenceOptions);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder httpPipeline(HttpPipeline httpPipeline) {
super.httpPipeline(httpPipeline);
return this;
}
/**
* {@inheritDoc}
*/
/**
* {@inheritDoc}
*/
@Override
public InteractiveBrowserBrokerCredentialBuilder proxyOptions(ProxyOptions proxyOptions) {
super.proxyOptions(proxyOptions);
return this;
}
} |
This will throw NullPointerException if recipient is null for instance, right? | void verifyRecipientEmailAddressesNotNull(List<EmailAddress> recipients) {
if (recipients != null) {
for (EmailAddress recipient : recipients) {
Objects.requireNonNull(recipient, "recipient 'EmailAddress' cannot be null.");
Objects.requireNonNull(recipient.getAddress(), "EmailAddress 'address' cannot be null.");
}
}
} | Objects.requireNonNull(recipient.getAddress(), "EmailAddress 'address' cannot be null."); | void verifyRecipientEmailAddressesNotNull(List<EmailAddress> recipients) {
if (recipients != null) {
for (EmailAddress recipient : recipients) {
Objects.requireNonNull(recipient, "recipient 'EmailAddress' cannot be null.");
Objects.requireNonNull(recipient.getAddress(), "EmailAddress 'address' cannot be null.");
}
}
} | class EmailAsyncClient {
private final EmailsImpl emailServiceClient;
private final AzureCommunicationEmailServiceImpl serviceClient;
private static final ClientLogger LOGGER = new ClientLogger(EmailAsyncClient.class);
/**
* Initializes an instance of EmailAsyncClient class.
*
* @param serviceClient the service client implementation.
*/
EmailAsyncClient(AzureCommunicationEmailServiceImpl serviceClient) {
this.serviceClient = serviceClient;
this.emailServiceClient = serviceClient.getEmails();
}
/**
* Queues an email message to be sent to one or more recipients.
*
* @param message Message payload for sending an email.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the {@link PollerFlux} for polling of status of the long running operation.
*/
@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)
public PollerFlux<EmailSendResult, EmailSendResult> beginSend(EmailMessage message) {
return beginSend(message, null);
}
PollerFlux<EmailSendResult, EmailSendResult> beginSend(EmailMessage message, Context context) {
Objects.requireNonNull(message, "'message' cannot be null.");
Objects.requireNonNull(message.getSenderAddress(), "'senderAddress' cannot be null.");
Objects.requireNonNull(message.getSubject(), "'subject' cannot be null.");
if (message.getBodyHtml() == null && message.getBodyPlainText() == null) {
throw LOGGER.logExceptionAsError(
new NullPointerException("'bodyHtml' and 'bodyPlainText' cannot both be null."));
}
if (message.getToRecipients() == null && message.getCcRecipients() == null
&& message.getBccRecipients() == null) {
throw LOGGER.logExceptionAsError(
new NullPointerException(
"'toRecipients', 'ccRecipients', and 'bccRecipients' cannot all be null.")
);
}
verifyRecipientEmailAddressesNotNull(message.getToRecipients());
verifyRecipientEmailAddressesNotNull(message.getCcRecipients());
verifyRecipientEmailAddressesNotNull(message.getBccRecipients());
EmailContent content = new EmailContent(message.getSubject())
.setHtml(message.getBodyHtml())
.setPlainText(message.getBodyPlainText());
EmailRecipients recipients = new EmailRecipients()
.setTo(message.getToRecipients())
.setCc(message.getCcRecipients())
.setBCC(message.getBccRecipients());
List<com.azure.communication.email.implementation.models.EmailAttachment> attachmentsImpl = null;
if (message.getAttachments() != null) {
attachmentsImpl = new ArrayList<>();
for (EmailAttachment attachment: message.getAttachments()) {
attachmentsImpl.add(new com.azure.communication.email.implementation.models.EmailAttachment(
attachment.getName(),
attachment.getContentType(),
Base64.getEncoder().encodeToString(attachment.getContent().toBytes())
));
}
}
com.azure.communication.email.implementation.models.EmailMessage messageImpl
= new com.azure.communication.email.implementation.models.EmailMessage(
message.getSenderAddress(), content, recipients);
messageImpl
.setHeaders(message.getHeaders())
.setAttachments(attachmentsImpl)
.setReplyTo(message.getReplyTo())
.setUserEngagementTrackingDisabled(message.isUserEngagementTrackingDisabled());
return PollerFlux.create(
Duration.ofSeconds(1),
() -> emailServiceClient.sendWithResponseAsync(messageImpl, null, null, context),
new DefaultPollingStrategy<>(
this.serviceClient.getHttpPipeline(),
"{endpoint}".replace("{endpoint}", this.serviceClient.getEndpoint()),
null,
context),
TypeReference.createInstance(EmailSendResult.class),
TypeReference.createInstance(EmailSendResult.class));
}
} | class EmailAsyncClient {
private final EmailsImpl emailServiceClient;
private final AzureCommunicationEmailServiceImpl serviceClient;
private static final ClientLogger LOGGER = new ClientLogger(EmailAsyncClient.class);
/**
* Initializes an instance of EmailAsyncClient class.
*
* @param serviceClient the service client implementation.
*/
EmailAsyncClient(AzureCommunicationEmailServiceImpl serviceClient) {
this.serviceClient = serviceClient;
this.emailServiceClient = serviceClient.getEmails();
}
/**
* Queues an email message to be sent to one or more recipients.
*
* @param message Message payload for sending an email.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the {@link PollerFlux} for polling of status of the long running operation.
*/
@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)
public PollerFlux<EmailSendResult, EmailSendResult> beginSend(EmailMessage message) {
return beginSend(message, null);
}
PollerFlux<EmailSendResult, EmailSendResult> beginSend(EmailMessage message, Context context) {
Objects.requireNonNull(message, "'message' cannot be null.");
Objects.requireNonNull(message.getSenderAddress(), "'senderAddress' cannot be null.");
Objects.requireNonNull(message.getSubject(), "'subject' cannot be null.");
if (message.getBodyHtml() == null && message.getBodyPlainText() == null) {
throw LOGGER.logExceptionAsError(
new NullPointerException("'bodyHtml' and 'bodyPlainText' cannot both be null."));
}
if (message.getToRecipients() == null && message.getCcRecipients() == null
&& message.getBccRecipients() == null) {
throw LOGGER.logExceptionAsError(
new NullPointerException(
"'toRecipients', 'ccRecipients', and 'bccRecipients' cannot all be null.")
);
}
verifyRecipientEmailAddressesNotNull(message.getToRecipients());
verifyRecipientEmailAddressesNotNull(message.getCcRecipients());
verifyRecipientEmailAddressesNotNull(message.getBccRecipients());
EmailContent content = new EmailContent(message.getSubject())
.setHtml(message.getBodyHtml())
.setPlainText(message.getBodyPlainText());
EmailRecipients recipients = new EmailRecipients()
.setTo(message.getToRecipients())
.setCc(message.getCcRecipients())
.setBCC(message.getBccRecipients());
List<com.azure.communication.email.implementation.models.EmailAttachment> attachmentsImpl = null;
if (message.getAttachments() != null) {
attachmentsImpl = new ArrayList<>();
for (EmailAttachment attachment: message.getAttachments()) {
attachmentsImpl.add(new com.azure.communication.email.implementation.models.EmailAttachment(
attachment.getName(),
attachment.getContentType(),
Base64.getEncoder().encodeToString(attachment.getContent().toBytes())
));
}
}
com.azure.communication.email.implementation.models.EmailMessage messageImpl
= new com.azure.communication.email.implementation.models.EmailMessage(
message.getSenderAddress(), content, recipients);
messageImpl
.setHeaders(message.getHeaders())
.setAttachments(attachmentsImpl)
.setReplyTo(message.getReplyTo())
.setUserEngagementTrackingDisabled(message.isUserEngagementTrackingDisabled());
return PollerFlux.create(
Duration.ofSeconds(1),
() -> emailServiceClient.sendWithResponseAsync(messageImpl, null, null, context),
new DefaultPollingStrategy<>(
this.serviceClient.getHttpPipeline(),
"{endpoint}".replace("{endpoint}", this.serviceClient.getEndpoint()),
null,
context),
TypeReference.createInstance(EmailSendResult.class),
TypeReference.createInstance(EmailSendResult.class));
}
} |
Yup, that's correct. | void verifyRecipientEmailAddressesNotNull(List<EmailAddress> recipients) {
if (recipients != null) {
for (EmailAddress recipient : recipients) {
Objects.requireNonNull(recipient, "recipient 'EmailAddress' cannot be null.");
Objects.requireNonNull(recipient.getAddress(), "EmailAddress 'address' cannot be null.");
}
}
} | Objects.requireNonNull(recipient.getAddress(), "EmailAddress 'address' cannot be null."); | void verifyRecipientEmailAddressesNotNull(List<EmailAddress> recipients) {
if (recipients != null) {
for (EmailAddress recipient : recipients) {
Objects.requireNonNull(recipient, "recipient 'EmailAddress' cannot be null.");
Objects.requireNonNull(recipient.getAddress(), "EmailAddress 'address' cannot be null.");
}
}
} | class EmailAsyncClient {
private final EmailsImpl emailServiceClient;
private final AzureCommunicationEmailServiceImpl serviceClient;
private static final ClientLogger LOGGER = new ClientLogger(EmailAsyncClient.class);
/**
* Initializes an instance of EmailAsyncClient class.
*
* @param serviceClient the service client implementation.
*/
EmailAsyncClient(AzureCommunicationEmailServiceImpl serviceClient) {
this.serviceClient = serviceClient;
this.emailServiceClient = serviceClient.getEmails();
}
/**
* Queues an email message to be sent to one or more recipients.
*
* @param message Message payload for sending an email.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the {@link PollerFlux} for polling of status of the long running operation.
*/
@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)
public PollerFlux<EmailSendResult, EmailSendResult> beginSend(EmailMessage message) {
return beginSend(message, null);
}
PollerFlux<EmailSendResult, EmailSendResult> beginSend(EmailMessage message, Context context) {
Objects.requireNonNull(message, "'message' cannot be null.");
Objects.requireNonNull(message.getSenderAddress(), "'senderAddress' cannot be null.");
Objects.requireNonNull(message.getSubject(), "'subject' cannot be null.");
if (message.getBodyHtml() == null && message.getBodyPlainText() == null) {
throw LOGGER.logExceptionAsError(
new NullPointerException("'bodyHtml' and 'bodyPlainText' cannot both be null."));
}
if (message.getToRecipients() == null && message.getCcRecipients() == null
&& message.getBccRecipients() == null) {
throw LOGGER.logExceptionAsError(
new NullPointerException(
"'toRecipients', 'ccRecipients', and 'bccRecipients' cannot all be null.")
);
}
verifyRecipientEmailAddressesNotNull(message.getToRecipients());
verifyRecipientEmailAddressesNotNull(message.getCcRecipients());
verifyRecipientEmailAddressesNotNull(message.getBccRecipients());
EmailContent content = new EmailContent(message.getSubject())
.setHtml(message.getBodyHtml())
.setPlainText(message.getBodyPlainText());
EmailRecipients recipients = new EmailRecipients()
.setTo(message.getToRecipients())
.setCc(message.getCcRecipients())
.setBCC(message.getBccRecipients());
List<com.azure.communication.email.implementation.models.EmailAttachment> attachmentsImpl = null;
if (message.getAttachments() != null) {
attachmentsImpl = new ArrayList<>();
for (EmailAttachment attachment: message.getAttachments()) {
attachmentsImpl.add(new com.azure.communication.email.implementation.models.EmailAttachment(
attachment.getName(),
attachment.getContentType(),
Base64.getEncoder().encodeToString(attachment.getContent().toBytes())
));
}
}
com.azure.communication.email.implementation.models.EmailMessage messageImpl
= new com.azure.communication.email.implementation.models.EmailMessage(
message.getSenderAddress(), content, recipients);
messageImpl
.setHeaders(message.getHeaders())
.setAttachments(attachmentsImpl)
.setReplyTo(message.getReplyTo())
.setUserEngagementTrackingDisabled(message.isUserEngagementTrackingDisabled());
return PollerFlux.create(
Duration.ofSeconds(1),
() -> emailServiceClient.sendWithResponseAsync(messageImpl, null, null, context),
new DefaultPollingStrategy<>(
this.serviceClient.getHttpPipeline(),
"{endpoint}".replace("{endpoint}", this.serviceClient.getEndpoint()),
null,
context),
TypeReference.createInstance(EmailSendResult.class),
TypeReference.createInstance(EmailSendResult.class));
}
} | class EmailAsyncClient {
private final EmailsImpl emailServiceClient;
private final AzureCommunicationEmailServiceImpl serviceClient;
private static final ClientLogger LOGGER = new ClientLogger(EmailAsyncClient.class);
/**
* Initializes an instance of EmailAsyncClient class.
*
* @param serviceClient the service client implementation.
*/
EmailAsyncClient(AzureCommunicationEmailServiceImpl serviceClient) {
this.serviceClient = serviceClient;
this.emailServiceClient = serviceClient.getEmails();
}
/**
* Queues an email message to be sent to one or more recipients.
*
* @param message Message payload for sending an email.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the {@link PollerFlux} for polling of status of the long running operation.
*/
@ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION)
public PollerFlux<EmailSendResult, EmailSendResult> beginSend(EmailMessage message) {
return beginSend(message, null);
}
PollerFlux<EmailSendResult, EmailSendResult> beginSend(EmailMessage message, Context context) {
Objects.requireNonNull(message, "'message' cannot be null.");
Objects.requireNonNull(message.getSenderAddress(), "'senderAddress' cannot be null.");
Objects.requireNonNull(message.getSubject(), "'subject' cannot be null.");
if (message.getBodyHtml() == null && message.getBodyPlainText() == null) {
throw LOGGER.logExceptionAsError(
new NullPointerException("'bodyHtml' and 'bodyPlainText' cannot both be null."));
}
if (message.getToRecipients() == null && message.getCcRecipients() == null
&& message.getBccRecipients() == null) {
throw LOGGER.logExceptionAsError(
new NullPointerException(
"'toRecipients', 'ccRecipients', and 'bccRecipients' cannot all be null.")
);
}
verifyRecipientEmailAddressesNotNull(message.getToRecipients());
verifyRecipientEmailAddressesNotNull(message.getCcRecipients());
verifyRecipientEmailAddressesNotNull(message.getBccRecipients());
EmailContent content = new EmailContent(message.getSubject())
.setHtml(message.getBodyHtml())
.setPlainText(message.getBodyPlainText());
EmailRecipients recipients = new EmailRecipients()
.setTo(message.getToRecipients())
.setCc(message.getCcRecipients())
.setBCC(message.getBccRecipients());
List<com.azure.communication.email.implementation.models.EmailAttachment> attachmentsImpl = null;
if (message.getAttachments() != null) {
attachmentsImpl = new ArrayList<>();
for (EmailAttachment attachment: message.getAttachments()) {
attachmentsImpl.add(new com.azure.communication.email.implementation.models.EmailAttachment(
attachment.getName(),
attachment.getContentType(),
Base64.getEncoder().encodeToString(attachment.getContent().toBytes())
));
}
}
com.azure.communication.email.implementation.models.EmailMessage messageImpl
= new com.azure.communication.email.implementation.models.EmailMessage(
message.getSenderAddress(), content, recipients);
messageImpl
.setHeaders(message.getHeaders())
.setAttachments(attachmentsImpl)
.setReplyTo(message.getReplyTo())
.setUserEngagementTrackingDisabled(message.isUserEngagementTrackingDisabled());
return PollerFlux.create(
Duration.ofSeconds(1),
() -> emailServiceClient.sendWithResponseAsync(messageImpl, null, null, context),
new DefaultPollingStrategy<>(
this.serviceClient.getHttpPipeline(),
"{endpoint}".replace("{endpoint}", this.serviceClient.getEndpoint()),
null,
context),
TypeReference.createInstance(EmailSendResult.class),
TypeReference.createInstance(EmailSendResult.class));
}
} |
we don't collect non-essential statsbeat in the standalone exporter. getNonessentialStatsbeat() will return null. | private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryPipelineListener telemetryPipelineListener;
if (tempDir == null) {
telemetryPipelineListener =
new DiagnosticTelemetryPipelineListener(
"Sending telemetry to the ingestion service", false, " (telemetry will be lost)");
} else {
telemetryPipelineListener =
TelemetryPipelineListener.composite(
new DiagnosticTelemetryPipelineListener(
"Sending telemetry to the ingestion service",
true,
" (telemetry will be stored to disk and retried)"),
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
statsbeatModule.getNonessentialStatsbeat(),
false));
}
return new TelemetryItemExporter(telemetryPipeline, telemetryPipelineListener);
} | statsbeatModule.getNonessentialStatsbeat(), | private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryPipelineListener telemetryPipelineListener;
if (tempDir == null) {
telemetryPipelineListener =
new DiagnosticTelemetryPipelineListener(
"Sending telemetry to the ingestion service", false, " (telemetry will be lost)");
} else {
telemetryPipelineListener =
TelemetryPipelineListener.composite(
new DiagnosticTelemetryPipelineListener(
"Sending telemetry to the ingestion service",
true,
" (telemetry will be stored to disk and retried)"),
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
}
return new TelemetryItemExporter(telemetryPipeline, telemetryPipelineListener);
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
} |
ah, thx, I'm still not clear why we don't collect the same in both places, but will revert this change for this PR and we can revisit later | private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryPipelineListener telemetryPipelineListener;
if (tempDir == null) {
telemetryPipelineListener =
new DiagnosticTelemetryPipelineListener(
"Sending telemetry to the ingestion service", false, " (telemetry will be lost)");
} else {
telemetryPipelineListener =
TelemetryPipelineListener.composite(
new DiagnosticTelemetryPipelineListener(
"Sending telemetry to the ingestion service",
true,
" (telemetry will be stored to disk and retried)"),
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
statsbeatModule.getNonessentialStatsbeat(),
false));
}
return new TelemetryItemExporter(telemetryPipeline, telemetryPipelineListener);
} | statsbeatModule.getNonessentialStatsbeat(), | private TelemetryItemExporter createTelemetryItemExporter(StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryPipelineListener telemetryPipelineListener;
if (tempDir == null) {
telemetryPipelineListener =
new DiagnosticTelemetryPipelineListener(
"Sending telemetry to the ingestion service", false, " (telemetry will be lost)");
} else {
telemetryPipelineListener =
TelemetryPipelineListener.composite(
new DiagnosticTelemetryPipelineListener(
"Sending telemetry to the ingestion service",
true,
" (telemetry will be stored to disk and retried)"),
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
}
return new TelemetryItemExporter(telemetryPipeline, telemetryPipelineListener);
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private HttpPipeline builtHttpPipeline;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
builtHttpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
} |
the problem was the `httpPipeline` here on this (unchanged) line used to reference the (user populated) field instead of the `HttpPipeline` that was just built | private static TelemetryItemExporter createTelemetryItemExporter(HttpPipeline httpPipeline, StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
} | TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule); | private static TelemetryItemExporter createTelemetryItemExporter(HttpPipeline httpPipeline, StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
HttpPipeline httpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(httpPipeline, statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
HttpPipeline httpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(httpPipeline, statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
} |
too bad our test didn't catch it. | private static TelemetryItemExporter createTelemetryItemExporter(HttpPipeline httpPipeline, StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
} | TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule); | private static TelemetryItemExporter createTelemetryItemExporter(HttpPipeline httpPipeline, StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
HttpPipeline httpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(httpPipeline, statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
HttpPipeline httpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(httpPipeline, statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
} |
let's discuss, it may be worth pulling over our fake ingestion endpoint from https://github.com/microsoft/ApplicationInsights-Java/tree/main/smoke-tests/framework so we can have real end-to-end tests | private static TelemetryItemExporter createTelemetryItemExporter(HttpPipeline httpPipeline, StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
} | TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule); | private static TelemetryItemExporter createTelemetryItemExporter(HttpPipeline httpPipeline, StatsbeatModule statsbeatModule) {
TelemetryPipeline telemetryPipeline = new TelemetryPipeline(httpPipeline, statsbeatModule);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
TelemetryItemExporter telemetryItemExporter;
if (tempDir != null) {
telemetryItemExporter =
new TelemetryItemExporter(
telemetryPipeline,
new LocalStorageTelemetryPipelineListener(
50,
TempDirs.getSubDir(tempDir, "telemetry"),
telemetryPipeline,
LocalStorageStats.noop(),
false));
} else {
telemetryItemExporter = new TelemetryItemExporter(
telemetryPipeline,
TelemetryPipelineListener.noop());
}
return telemetryItemExporter;
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
HttpPipeline httpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(httpPipeline, statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporter.
*/
public void build(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
HttpPipeline httpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
builtTelemetryItemExporter = createTelemetryItemExporter(httpPipeline, statsbeatModule);
startStatsbeatModule(statsbeatModule, configProperties);
frozen = true;
}
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties) {
statsbeatModule.start(
builtTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
} |
```suggestion // TODO (heya) change LocalStorageStats.noop() to statsbeatModule.getNonessentialStatsbeat() when we decide to collect non-essential Statsbeat by default. ``` | private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
HttpPipeline httpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
builtTelemetryItemExporter = AzureMonitorHelper.createTelemetryItemExporter(httpPipeline, statsbeatModule, tempDir, LocalStorageStats.noop());
startStatsbeatModule(statsbeatModule, configProperties, tempDir);
frozen = true;
}
} | private void internalBuildAndFreeze(ConfigProperties configProperties) {
if (!frozen) {
HttpPipeline httpPipeline = createHttpPipeline();
StatsbeatModule statsbeatModule = initStatsbeatModule(configProperties);
File tempDir =
TempDirs.getApplicationInsightsTempDir(
LOGGER,
"Telemetry will not be stored to disk and retried on sporadic network failures");
builtTelemetryItemExporter = AzureMonitorHelper.createTelemetryItemExporter(httpPipeline, statsbeatModule, tempDir, LocalStorageStats.noop());
startStatsbeatModule(statsbeatModule, configProperties, tempDir);
frozen = true;
}
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporters.
*/
public void install(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties, File tempDir) {
HttpPipeline statsbeatHttpPipeline = createStatsbeatHttpPipeline();
TelemetryItemExporter statsbeatTelemetryItemExporter = AzureMonitorHelper.createStatsbeatTelemetryItemExporter(statsbeatHttpPipeline, statsbeatModule, tempDir);
statsbeatModule.start(
statsbeatTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
private HttpPipeline createStatsbeatHttpPipeline() {
if (httpPipeline != null) {
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
} | class AzureMonitorExporterBuilder {
private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class);
private static final String APPLICATIONINSIGHTS_CONNECTION_STRING =
"APPLICATIONINSIGHTS_CONNECTION_STRING";
private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE =
"https:
private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME";
private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME";
private static final Map<String, String> PROPERTIES =
CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties");
private ConnectionString connectionString;
private TokenCredential credential;
@SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"})
private AzureMonitorExporterServiceVersion serviceVersion;
private HttpPipeline httpPipeline;
private HttpClient httpClient;
private HttpLogOptions httpLogOptions;
private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>();
private ClientOptions clientOptions;
private boolean frozen;
private TelemetryItemExporter builtTelemetryItemExporter;
/**
* Creates an instance of {@link AzureMonitorExporterBuilder}.
*/
public AzureMonitorExporterBuilder() {
}
/**
* Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other
* settings are ignored.
*
* @param httpPipeline The HTTP pipeline to use for sending service requests and receiving
* responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipeline cannot be changed after any of the build methods have been called"));
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the HTTP client to use for sending and receiving requests to and from the service.
*
* @param httpClient The HTTP client to use for requests.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpClient cannot be changed after any of the build methods have been called"));
}
this.httpClient = httpClient;
return this;
}
/**
* Sets the logging configuration for HTTP requests and responses.
*
* <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel
*
* @param httpLogOptions The logging configuration to use when sending and receiving HTTP
* requests/responses.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpLogOptions cannot be changed after any of the build methods have been called"));
}
this.httpLogOptions = httpLogOptions;
return this;
}
/**
* Adds a policy to the set of existing policies that are executed after required policies.
*
* @param httpPipelinePolicy a policy to be added to the http pipeline.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If {@code policy} is {@code null}.
*/
public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"httpPipelinePolicies cannot be added after any of the build methods have been called"));
}
httpPipelinePolicies.add(
Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null."));
return this;
}
/**
* Sets the client options such as application ID and custom headers to set on a request.
*
* @param clientOptions The client options.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"clientOptions cannot be changed after any of the build methods have been called"));
}
this.clientOptions = clientOptions;
return this;
}
/**
* Sets the connection string to use for exporting telemetry events to Azure Monitor.
*
* @param connectionString The connection string for the Azure Monitor resource.
* @return The updated {@link AzureMonitorExporterBuilder} object.
* @throws NullPointerException If the connection string is {@code null}.
* @throws IllegalArgumentException If the connection string is invalid.
*/
public AzureMonitorExporterBuilder connectionString(String connectionString) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"connectionString cannot be changed after any of the build methods have been called"));
}
this.connectionString = ConnectionString.parse(connectionString);
return this;
}
/**
* Sets the Azure Monitor service version.
*
* @param serviceVersion The Azure Monitor service version.
* @return The update {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder serviceVersion(
AzureMonitorExporterServiceVersion serviceVersion) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"serviceVersion cannot be changed after any of the build methods have been called"));
}
this.serviceVersion = serviceVersion;
return this;
}
/**
* Sets the token credential required for authentication with the ingestion endpoint service.
*
* @param credential The Azure Identity TokenCredential.
* @return The updated {@link AzureMonitorExporterBuilder} object.
*/
public AzureMonitorExporterBuilder credential(TokenCredential credential) {
if (frozen) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"credential cannot be changed after any of the build methods have been called"));
}
this.credential = credential;
return this;
}
/**
* Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link SpanExporter}.
*
* @return An instance of {@link AzureMonitorTraceExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public SpanExporter buildTraceExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildTraceExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link MetricExporter}.
*
* <p>When a new {@link MetricExporter} is created, it will automatically start {@link
* HeartbeatExporter}.
*
* @return An instance of {@link AzureMonitorMetricExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public MetricExporter buildMetricExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildMetricExporter(defaultConfig);
}
/**
* Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This
* exporter is an implementation of OpenTelemetry {@link LogRecordExporter}.
*
* @return An instance of {@link AzureMonitorLogRecordExporter}.
* @throws NullPointerException if the connection string is not set on this builder or if the
* environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set.
*/
public LogRecordExporter buildLogRecordExporter() {
ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap());
internalBuildAndFreeze(defaultConfig);
return buildLogRecordExporter(defaultConfig);
}
/**
* Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder.
*
* @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporters.
*/
public void install(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) {
sdkBuilder.addPropertiesSupplier(() -> {
Map<String, String> props = new HashMap<>();
props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME);
props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true");
return props;
});
sdkBuilder.addSpanExporterCustomizer(
(spanExporter, configProperties) -> {
if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) {
internalBuildAndFreeze(configProperties);
spanExporter = buildTraceExporter(configProperties);
}
return spanExporter;
});
sdkBuilder.addMetricExporterCustomizer(
(metricExporter, configProperties) -> {
if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) {
internalBuildAndFreeze(configProperties);
metricExporter = buildMetricExporter(configProperties);
}
return metricExporter;
});
sdkBuilder.addLogRecordExporterCustomizer(
(logRecordExporter, configProperties) -> {
if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) {
internalBuildAndFreeze(configProperties);
logRecordExporter = buildLogRecordExporter(configProperties);
}
return logRecordExporter;
});
sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.trace")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
).registerView(
InstrumentSelector.builder()
.setMeterName("io.opentelemetry.sdk.logs")
.build(),
View.builder()
.setAggregation(Aggregation.drop())
.build()
));
}
private SpanExporter buildTraceExporter(ConfigProperties configProperties) {
return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter);
}
private MetricExporter buildMetricExporter(ConfigProperties configProperties) {
HeartbeatExporter.start(
MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send);
return new AzureMonitorMetricExporter(
new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter);
}
private Set<Feature> initStatsbeatFeatures() {
return Collections.emptySet();
}
private StatsbeatConnectionString getStatsbeatConnectionString() {
return StatsbeatConnectionString.create(connectionString, null, null);
}
private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) {
return new AzureMonitorLogRecordExporter(
new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter);
}
private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) {
return new SpanDataMapper(
true,
createDefaultsPopulator(configProperties),
(event, instrumentationName) -> false,
(span, event) -> false);
}
private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) {
ConnectionString connectionString = getConnectionString(configProperties);
return (builder, resource) -> {
builder.setConnectionString(connectionString);
builder.setResource(resource);
builder.addTag(
ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion());
ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties);
};
}
private ConnectionString getConnectionString(ConfigProperties configProperties) {
if (connectionString != null) {
return connectionString;
}
ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING));
return Objects.requireNonNull(connectionString, "'connectionString' cannot be null");
}
private HttpPipeline createHttpPipeline() {
if (httpPipeline != null) {
if (credential != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'credential' is not supported when custom 'httpPipeline' is specified"));
}
if (httpClient != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpClient' is not supported when custom 'httpPipeline' is specified"));
}
if (httpLogOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpLogOptions' is not supported when custom 'httpPipeline' is specified"));
}
if (!httpPipelinePolicies.isEmpty()) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified"));
}
if (clientOptions != null) {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"'clientOptions' is not supported when custom 'httpPipeline' is specified"));
}
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
if (credential != null) {
policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE));
}
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) {
return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration);
}
private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties, File tempDir) {
HttpPipeline statsbeatHttpPipeline = createStatsbeatHttpPipeline();
TelemetryItemExporter statsbeatTelemetryItemExporter = AzureMonitorHelper.createStatsbeatTelemetryItemExporter(statsbeatHttpPipeline, statsbeatModule, tempDir);
statsbeatModule.start(
statsbeatTelemetryItemExporter,
this::getStatsbeatConnectionString,
getConnectionString(configProperties)::getInstrumentationKey,
false,
configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)),
configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)),
false,
initStatsbeatFeatures());
}
private HttpPipeline createStatsbeatHttpPipeline() {
if (httpPipeline != null) {
return httpPipeline;
}
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault("name", "UnknownName");
String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration()));
policies.add(new CookiePolicy());
policies.addAll(httpPipelinePolicies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new com.azure.core.http.HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.tracer(new NoopTracer())
.build();
}
} | |
If "content" is optional, you need some null point protection, e.g. `this.content = content == null ? null : content.toBytes();` | public ContentSafetyImageData setContent(BinaryData content) {
this.content = content.toBytes();
return this;
} | } | public ContentSafetyImageData setContent(BinaryData content) {
this.content = content == null ? null : content.toBytes();
return this;
} | class ContentSafetyImageData {
/*
* The Base64 encoding of the image.
*/
@Generated
@JsonProperty(value = "content")
private byte[] content;
/*
* The blob url of the image.
*/
@Generated
@JsonProperty(value = "blobUrl")
private String blobUri;
/** Creates an instance of ContentSafetyImageData class. */
@Generated
public ContentSafetyImageData() {}
/**
* Get the content property: The Base64 encoding of the image.
*
* @return the content value.
*/
public BinaryData getContent() {
return BinaryData.fromBytes(this.content);
}
/**
* Set the content property: The Base64 encoding of the image.
*
* @param content the content value to set.
* @return the ImageData object itself.
*/
/**
* Get the blobUri property: The blob url of the image.
*
* @return the blobUri value.
*/
@Generated
public String getBlobUri() {
return this.blobUri;
}
/**
* Set the blobUri property: The blob url of the image.
*
* @param blobUri the blobUri value to set.
* @return the ContentSafetyImageData object itself.
*/
@Generated
public ContentSafetyImageData setBlobUri(String blobUri) {
this.blobUri = blobUri;
return this;
}
} | class ContentSafetyImageData {
/*
* The Base64 encoding of the image.
*/
@Generated
@JsonProperty(value = "content")
private byte[] content;
/*
* The blob url of the image.
*/
@Generated
@JsonProperty(value = "blobUrl")
private String blobUrl;
/**
* Creates an instance of ContentSafetyImageData class.
*/
@Generated
public ContentSafetyImageData() {
}
/**
* Get the content property: The Base64 encoding of the image.
*
* @return the content value.
*/
public BinaryData getContent() {
return this.content == null ? null : BinaryData.fromBytes(this.content);
}
/**
* Set the content property: The Base64 encoding of the image.
*
* @param content the content value to set.
* @return the ContentSafetyImageData object itself.
*/
/**
* Get the blobUrl property: The blob url of the image.
*
* @return the blobUrl value.
*/
@Generated
public String getBlobUrl() {
return this.blobUrl;
}
/**
* Set the blobUrl property: The blob url of the image.
*
* @param blobUrl the blobUrl value to set.
* @return the ContentSafetyImageData object itself.
*/
@Generated
public ContentSafetyImageData setBlobUrl(String blobUrl) {
this.blobUrl = blobUrl;
return this;
}
} |
Oh, I see, you have already adjusted the customization to account for the types used in the union. | public ChatCompletionsOptions setFunctionCall(FunctionCallConfig functionCallConfig) {
this.functionCallConfig = functionCallConfig;
if (FunctionCallPreset.values().stream()
.anyMatch(preset -> preset.toString().equals(functionCallConfig.getName()))) {
this.functionCall = BinaryData.fromObject(FunctionCallPreset.fromString(this.functionCallConfig.getName()));
} else {
this.functionCall = BinaryData.fromObject(new FunctionName(this.functionCallConfig.getName()));
}
return this;
} | this.functionCall = BinaryData.fromObject(new FunctionName(this.functionCallConfig.getName())); | public ChatCompletionsOptions setFunctionCall(FunctionCallConfig functionCallConfig) {
this.functionCallConfig = functionCallConfig;
if (FunctionCallPreset.values().stream()
.anyMatch(preset -> preset.toString().equals(functionCallConfig.getName()))) {
this.functionCall = BinaryData.fromObject(FunctionCallPreset.fromString(this.functionCallConfig.getName()));
} else {
this.functionCall = BinaryData.fromObject(new FunctionName(this.functionCallConfig.getName()));
}
return this;
} | class ChatCompletionsOptions {
/*
* The collection of context messages associated with this chat completions request.
* Typical usage begins with a chat message for the System role that provides instructions for
* the behavior of the assistant, followed by alternating messages between the User and
* Assistant roles.
*/
@Generated
@JsonProperty(value = "messages")
private List<ChatMessage> messages;
/*
* The maximum number of tokens to generate.
*/
@Generated
@JsonProperty(value = "max_tokens")
private Integer maxTokens;
/*
* The sampling temperature to use that controls the apparent creativity of generated completions.
* Higher values will make output more random while lower values will make results more focused
* and deterministic.
* It is not recommended to modify temperature and top_p for the same completions request as the
* interaction of these two settings is difficult to predict.
*/
@Generated
@JsonProperty(value = "temperature")
private Double temperature;
/*
* An alternative to sampling with temperature called nucleus sampling. This value causes the
* model to consider the results of tokens with the provided probability mass. As an example, a
* value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be
* considered.
* It is not recommended to modify temperature and top_p for the same completions request as the
* interaction of these two settings is difficult to predict.
*/
@Generated
@JsonProperty(value = "top_p")
private Double topP;
/*
* A map between GPT token IDs and bias scores that influences the probability of specific tokens
* appearing in a completions response. Token IDs are computed via external tokenizer tools, while
* bias scores reside in the range of -100 to 100 with minimum and maximum values corresponding to
* a full ban or exclusive selection of a token, respectively. The exact behavior of a given bias
* score varies by model.
*/
@Generated
@JsonProperty(value = "logit_bias")
private Map<String, Integer> logitBias;
/*
* An identifier for the caller or end user of the operation. This may be used for tracking
* or rate-limiting purposes.
*/
@Generated
@JsonProperty(value = "user")
private String user;
/*
* The number of chat completions choices that should be generated for a chat completions
* response.
* Because this setting can generate many completions, it may quickly consume your token quota.
* Use carefully and ensure reasonable settings for max_tokens and stop.
*/
@Generated
@JsonProperty(value = "n")
private Integer n;
/*
* A collection of textual sequences that will end completions generation.
*/
@Generated
@JsonProperty(value = "stop")
private List<String> stop;
/*
* A value that influences the probability of generated tokens appearing based on their existing
* presence in generated text.
* Positive values will make tokens less likely to appear when they already exist and increase the
* model's likelihood to output new topics.
*/
@Generated
@JsonProperty(value = "presence_penalty")
private Double presencePenalty;
/*
* A value that influences the probability of generated tokens appearing based on their cumulative
* frequency in generated text.
* Positive values will make tokens less likely to appear as their frequency increases and
* decrease the likelihood of the model repeating the same statements verbatim.
*/
@Generated
@JsonProperty(value = "frequency_penalty")
private Double frequencyPenalty;
/*
* A value indicating whether chat completions should be streamed for this request.
*/
@Generated
@JsonProperty(value = "stream")
private Boolean stream;
/*
* The model name to provide as part of this completions request.
* Not applicable to Azure OpenAI, where deployment information should be included in the Azure
* resource URI that's connected to.
*/
@Generated
@JsonProperty(value = "model")
private String model;
/**
* Creates an instance of ChatCompletionsOptions class.
*
* @param messages the messages value to set.
*/
@Generated
@JsonCreator
public ChatCompletionsOptions(@JsonProperty(value = "messages") List<ChatMessage> messages) {
this.messages = messages;
}
/**
* Get the messages property: The collection of context messages associated with this chat completions request.
* Typical usage begins with a chat message for the System role that provides instructions for
* the behavior of the assistant, followed by alternating messages between the User and
* Assistant roles.
*
* @return the messages value.
*/
@Generated
public List<ChatMessage> getMessages() {
return this.messages;
}
/**
* Get the maxTokens property: The maximum number of tokens to generate.
*
* @return the maxTokens value.
*/
@Generated
public Integer getMaxTokens() {
return this.maxTokens;
}
/**
* Set the maxTokens property: The maximum number of tokens to generate.
*
* @param maxTokens the maxTokens value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setMaxTokens(Integer maxTokens) {
this.maxTokens = maxTokens;
return this;
}
/**
* Get the temperature property: The sampling temperature to use that controls the apparent creativity of generated
* completions.
* Higher values will make output more random while lower values will make results more focused
* and deterministic.
* It is not recommended to modify temperature and top_p for the same completions request as the
* interaction of these two settings is difficult to predict.
*
* @return the temperature value.
*/
@Generated
public Double getTemperature() {
return this.temperature;
}
/**
* Set the temperature property: The sampling temperature to use that controls the apparent creativity of generated
* completions.
* Higher values will make output more random while lower values will make results more focused
* and deterministic.
* It is not recommended to modify temperature and top_p for the same completions request as the
* interaction of these two settings is difficult to predict.
*
* @param temperature the temperature value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setTemperature(Double temperature) {
this.temperature = temperature;
return this;
}
/**
* Get the topP property: An alternative to sampling with temperature called nucleus sampling. This value causes
* the
* model to consider the results of tokens with the provided probability mass. As an example, a
* value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be
* considered.
* It is not recommended to modify temperature and top_p for the same completions request as the
* interaction of these two settings is difficult to predict.
*
* @return the topP value.
*/
@Generated
public Double getTopP() {
return this.topP;
}
/**
* Set the topP property: An alternative to sampling with temperature called nucleus sampling. This value causes
* the
* model to consider the results of tokens with the provided probability mass. As an example, a
* value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be
* considered.
* It is not recommended to modify temperature and top_p for the same completions request as the
* interaction of these two settings is difficult to predict.
*
* @param topP the topP value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setTopP(Double topP) {
this.topP = topP;
return this;
}
/**
* Get the logitBias property: A map between GPT token IDs and bias scores that influences the probability of
* specific tokens
* appearing in a completions response. Token IDs are computed via external tokenizer tools, while
* bias scores reside in the range of -100 to 100 with minimum and maximum values corresponding to
* a full ban or exclusive selection of a token, respectively. The exact behavior of a given bias
* score varies by model.
*
* @return the logitBias value.
*/
@Generated
public Map<String, Integer> getLogitBias() {
return this.logitBias;
}
/**
* Set the logitBias property: A map between GPT token IDs and bias scores that influences the probability of
* specific tokens
* appearing in a completions response. Token IDs are computed via external tokenizer tools, while
* bias scores reside in the range of -100 to 100 with minimum and maximum values corresponding to
* a full ban or exclusive selection of a token, respectively. The exact behavior of a given bias
* score varies by model.
*
* @param logitBias the logitBias value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setLogitBias(Map<String, Integer> logitBias) {
this.logitBias = logitBias;
return this;
}
/**
* Get the user property: An identifier for the caller or end user of the operation. This may be used for tracking
* or rate-limiting purposes.
*
* @return the user value.
*/
@Generated
public String getUser() {
return this.user;
}
/**
* Set the user property: An identifier for the caller or end user of the operation. This may be used for tracking
* or rate-limiting purposes.
*
* @param user the user value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setUser(String user) {
this.user = user;
return this;
}
/**
* Get the n property: The number of chat completions choices that should be generated for a chat completions
* response.
* Because this setting can generate many completions, it may quickly consume your token quota.
* Use carefully and ensure reasonable settings for max_tokens and stop.
*
* @return the n value.
*/
@Generated
public Integer getN() {
return this.n;
}
/**
* Set the n property: The number of chat completions choices that should be generated for a chat completions
* response.
* Because this setting can generate many completions, it may quickly consume your token quota.
* Use carefully and ensure reasonable settings for max_tokens and stop.
*
* @param n the n value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setN(Integer n) {
this.n = n;
return this;
}
/**
* Get the stop property: A collection of textual sequences that will end completions generation.
*
* @return the stop value.
*/
@Generated
public List<String> getStop() {
return this.stop;
}
/**
* Set the stop property: A collection of textual sequences that will end completions generation.
*
* @param stop the stop value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setStop(List<String> stop) {
this.stop = stop;
return this;
}
/**
* Get the presencePenalty property: A value that influences the probability of generated tokens appearing based on
* their existing
* presence in generated text.
* Positive values will make tokens less likely to appear when they already exist and increase the
* model's likelihood to output new topics.
*
* @return the presencePenalty value.
*/
@Generated
public Double getPresencePenalty() {
return this.presencePenalty;
}
/**
* Set the presencePenalty property: A value that influences the probability of generated tokens appearing based on
* their existing
* presence in generated text.
* Positive values will make tokens less likely to appear when they already exist and increase the
* model's likelihood to output new topics.
*
* @param presencePenalty the presencePenalty value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setPresencePenalty(Double presencePenalty) {
this.presencePenalty = presencePenalty;
return this;
}
/**
* Get the frequencyPenalty property: A value that influences the probability of generated tokens appearing based
* on their cumulative
* frequency in generated text.
* Positive values will make tokens less likely to appear as their frequency increases and
* decrease the likelihood of the model repeating the same statements verbatim.
*
* @return the frequencyPenalty value.
*/
@Generated
public Double getFrequencyPenalty() {
return this.frequencyPenalty;
}
/**
* Set the frequencyPenalty property: A value that influences the probability of generated tokens appearing based
* on their cumulative
* frequency in generated text.
* Positive values will make tokens less likely to appear as their frequency increases and
* decrease the likelihood of the model repeating the same statements verbatim.
*
* @param frequencyPenalty the frequencyPenalty value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setFrequencyPenalty(Double frequencyPenalty) {
this.frequencyPenalty = frequencyPenalty;
return this;
}
/**
* Get the stream property: A value indicating whether chat completions should be streamed for this request.
*
* @return the stream value.
*/
@Generated
public Boolean isStream() {
return this.stream;
}
/**
* Set the stream property: A value indicating whether chat completions should be streamed for this request.
*
* @param stream the stream value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setStream(Boolean stream) {
this.stream = stream;
return this;
}
/**
* Get the model property: The model name to provide as part of this completions request.
* Not applicable to Azure OpenAI, where deployment information should be included in the Azure
* resource URI that's connected to.
*
* @return the model value.
*/
@Generated
public String getModel() {
return this.model;
}
/**
* Set the model property: The model name to provide as part of this completions request.
* Not applicable to Azure OpenAI, where deployment information should be included in the Azure
* resource URI that's connected to.
*
* @param model the model value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setModel(String model) {
this.model = model;
return this;
}
/*
* A list of functions the model may generate JSON inputs for.
*/
@Generated
@JsonProperty(value = "functions")
private List<FunctionDefinition> functions;
/*
* Controls how the model responds to function calls. "none" means the model does not call a function,
* and responds to the end-user. "auto" means the model can pick between an end-user or calling a function.
* Specifying a particular function via `{"name": "my_function"}` forces the model to call that function.
* "none" is the default when no functions are present. "auto" is the default if functions are present.
*/
@Generated
@JsonProperty(value = "function_call")
private BinaryData functionCall;
/*
* Field not used for serialization. This is a convenience helper field for the serialization of "function_call".
*/
@JsonIgnore
private FunctionCallConfig functionCallConfig;
/**
* Get the functions property: A list of functions the model may generate JSON inputs for.
*
* @return the functions value.
*/
@Generated
public List<FunctionDefinition> getFunctions() {
return this.functions;
}
/**
* Set the functions property: A list of functions the model may generate JSON inputs for.
*
* @param functions the functions value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setFunctions(List<FunctionDefinition> functions) {
this.functions = functions;
return this;
}
/**
* Get the functionCall property: Controls how the model responds to function calls. "none" means the model does not
* call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a
* function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function.
* "none" is the default when no functions are present. "auto" is the default if functions are present.
*
* @return the functionCall value.
*/
BinaryData getFunctionCallInternal() {
return this.functionCall;
}
/**
* Set the functionCall property: Controls how the model responds to function calls. "none" means the model does not
* call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a
* function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function.
* "none" is the default when no functions are present. "auto" is the default if functions are present.
*
* @param functionCall the functionCall value to set.
* @return the ChatCompletionsOptions object itself.
*/
ChatCompletionsOptions setFunctionCallInternal(BinaryData functionCall) {
this.functionCall = functionCall;
return this;
}
/**
* Get the functionCall property: Controls how the model responds to function calls. "none" means the model does not
* call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a
* function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function.
* "none" is the default when no functions are present. "auto" is the default if functions are present.
*
* @return the functionCall value.
*/
public FunctionCallConfig getFunctionCall() {
return this.functionCallConfig;
}
/**
* Set the functionCall property: Controls how the model responds to function calls. "none" means the model does not
* call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a
* function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function.
* "none" is the default when no functions are present. "auto" is the default if functions are present.
*
* @param functionCallConfig the functionCall value to set.
* @return the ChatCompletionsOptions object itself.
*/
/*
* The configuration entries for Azure OpenAI chat extensions that use them.
* This additional specification is only compatible with Azure OpenAI.
*/
@Generated
@JsonProperty(value = "dataSources")
private List<AzureChatExtensionConfiguration> dataSources;
/**
* Get the dataSources property: The configuration entries for Azure OpenAI chat extensions that use them.
* This additional specification is only compatible with Azure OpenAI.
*
* @return the dataSources value.
*/
@Generated
public List<AzureChatExtensionConfiguration> getDataSources() {
return this.dataSources;
}
/**
* Set the dataSources property: The configuration entries for Azure OpenAI chat extensions that use them.
* This additional specification is only compatible with Azure OpenAI.
*
* @param dataSources the dataSources value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setDataSources(List<AzureChatExtensionConfiguration> dataSources) {
this.dataSources = dataSources;
return this;
}
} | class ChatCompletionsOptions {
/*
* The collection of context messages associated with this chat completions request.
* Typical usage begins with a chat message for the System role that provides instructions for
* the behavior of the assistant, followed by alternating messages between the User and
* Assistant roles.
*/
@Generated
@JsonProperty(value = "messages")
private List<ChatMessage> messages;
/*
* The maximum number of tokens to generate.
*/
@Generated
@JsonProperty(value = "max_tokens")
private Integer maxTokens;
/*
* The sampling temperature to use that controls the apparent creativity of generated completions.
* Higher values will make output more random while lower values will make results more focused
* and deterministic.
* It is not recommended to modify temperature and top_p for the same completions request as the
* interaction of these two settings is difficult to predict.
*/
@Generated
@JsonProperty(value = "temperature")
private Double temperature;
/*
* An alternative to sampling with temperature called nucleus sampling. This value causes the
* model to consider the results of tokens with the provided probability mass. As an example, a
* value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be
* considered.
* It is not recommended to modify temperature and top_p for the same completions request as the
* interaction of these two settings is difficult to predict.
*/
@Generated
@JsonProperty(value = "top_p")
private Double topP;
/*
* A map between GPT token IDs and bias scores that influences the probability of specific tokens
* appearing in a completions response. Token IDs are computed via external tokenizer tools, while
* bias scores reside in the range of -100 to 100 with minimum and maximum values corresponding to
* a full ban or exclusive selection of a token, respectively. The exact behavior of a given bias
* score varies by model.
*/
@Generated
@JsonProperty(value = "logit_bias")
private Map<String, Integer> logitBias;
/*
* An identifier for the caller or end user of the operation. This may be used for tracking
* or rate-limiting purposes.
*/
@Generated
@JsonProperty(value = "user")
private String user;
/*
* The number of chat completions choices that should be generated for a chat completions
* response.
* Because this setting can generate many completions, it may quickly consume your token quota.
* Use carefully and ensure reasonable settings for max_tokens and stop.
*/
@Generated
@JsonProperty(value = "n")
private Integer n;
/*
* A collection of textual sequences that will end completions generation.
*/
@Generated
@JsonProperty(value = "stop")
private List<String> stop;
/*
* A value that influences the probability of generated tokens appearing based on their existing
* presence in generated text.
* Positive values will make tokens less likely to appear when they already exist and increase the
* model's likelihood to output new topics.
*/
@Generated
@JsonProperty(value = "presence_penalty")
private Double presencePenalty;
/*
* A value that influences the probability of generated tokens appearing based on their cumulative
* frequency in generated text.
* Positive values will make tokens less likely to appear as their frequency increases and
* decrease the likelihood of the model repeating the same statements verbatim.
*/
@Generated
@JsonProperty(value = "frequency_penalty")
private Double frequencyPenalty;
/*
* A value indicating whether chat completions should be streamed for this request.
*/
@Generated
@JsonProperty(value = "stream")
private Boolean stream;
/*
* The model name to provide as part of this completions request.
* Not applicable to Azure OpenAI, where deployment information should be included in the Azure
* resource URI that's connected to.
*/
@Generated
@JsonProperty(value = "model")
private String model;
/**
* Creates an instance of ChatCompletionsOptions class.
*
* @param messages the messages value to set.
*/
@Generated
@JsonCreator
public ChatCompletionsOptions(@JsonProperty(value = "messages") List<ChatMessage> messages) {
this.messages = messages;
}
/**
* Get the messages property: The collection of context messages associated with this chat completions request.
* Typical usage begins with a chat message for the System role that provides instructions for
* the behavior of the assistant, followed by alternating messages between the User and
* Assistant roles.
*
* @return the messages value.
*/
@Generated
public List<ChatMessage> getMessages() {
return this.messages;
}
/**
* Get the maxTokens property: The maximum number of tokens to generate.
*
* @return the maxTokens value.
*/
@Generated
public Integer getMaxTokens() {
return this.maxTokens;
}
/**
* Set the maxTokens property: The maximum number of tokens to generate.
*
* @param maxTokens the maxTokens value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setMaxTokens(Integer maxTokens) {
this.maxTokens = maxTokens;
return this;
}
/**
* Get the temperature property: The sampling temperature to use that controls the apparent creativity of generated
* completions.
* Higher values will make output more random while lower values will make results more focused
* and deterministic.
* It is not recommended to modify temperature and top_p for the same completions request as the
* interaction of these two settings is difficult to predict.
*
* @return the temperature value.
*/
@Generated
public Double getTemperature() {
return this.temperature;
}
/**
* Set the temperature property: The sampling temperature to use that controls the apparent creativity of generated
* completions.
* Higher values will make output more random while lower values will make results more focused
* and deterministic.
* It is not recommended to modify temperature and top_p for the same completions request as the
* interaction of these two settings is difficult to predict.
*
* @param temperature the temperature value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setTemperature(Double temperature) {
this.temperature = temperature;
return this;
}
/**
* Get the topP property: An alternative to sampling with temperature called nucleus sampling. This value causes
* the
* model to consider the results of tokens with the provided probability mass. As an example, a
* value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be
* considered.
* It is not recommended to modify temperature and top_p for the same completions request as the
* interaction of these two settings is difficult to predict.
*
* @return the topP value.
*/
@Generated
public Double getTopP() {
return this.topP;
}
/**
* Set the topP property: An alternative to sampling with temperature called nucleus sampling. This value causes
* the
* model to consider the results of tokens with the provided probability mass. As an example, a
* value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be
* considered.
* It is not recommended to modify temperature and top_p for the same completions request as the
* interaction of these two settings is difficult to predict.
*
* @param topP the topP value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setTopP(Double topP) {
this.topP = topP;
return this;
}
/**
* Get the logitBias property: A map between GPT token IDs and bias scores that influences the probability of
* specific tokens
* appearing in a completions response. Token IDs are computed via external tokenizer tools, while
* bias scores reside in the range of -100 to 100 with minimum and maximum values corresponding to
* a full ban or exclusive selection of a token, respectively. The exact behavior of a given bias
* score varies by model.
*
* @return the logitBias value.
*/
@Generated
public Map<String, Integer> getLogitBias() {
return this.logitBias;
}
/**
* Set the logitBias property: A map between GPT token IDs and bias scores that influences the probability of
* specific tokens
* appearing in a completions response. Token IDs are computed via external tokenizer tools, while
* bias scores reside in the range of -100 to 100 with minimum and maximum values corresponding to
* a full ban or exclusive selection of a token, respectively. The exact behavior of a given bias
* score varies by model.
*
* @param logitBias the logitBias value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setLogitBias(Map<String, Integer> logitBias) {
this.logitBias = logitBias;
return this;
}
/**
* Get the user property: An identifier for the caller or end user of the operation. This may be used for tracking
* or rate-limiting purposes.
*
* @return the user value.
*/
@Generated
public String getUser() {
return this.user;
}
/**
* Set the user property: An identifier for the caller or end user of the operation. This may be used for tracking
* or rate-limiting purposes.
*
* @param user the user value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setUser(String user) {
this.user = user;
return this;
}
/**
* Get the n property: The number of chat completions choices that should be generated for a chat completions
* response.
* Because this setting can generate many completions, it may quickly consume your token quota.
* Use carefully and ensure reasonable settings for max_tokens and stop.
*
* @return the n value.
*/
@Generated
public Integer getN() {
return this.n;
}
/**
* Set the n property: The number of chat completions choices that should be generated for a chat completions
* response.
* Because this setting can generate many completions, it may quickly consume your token quota.
* Use carefully and ensure reasonable settings for max_tokens and stop.
*
* @param n the n value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setN(Integer n) {
this.n = n;
return this;
}
/**
* Get the stop property: A collection of textual sequences that will end completions generation.
*
* @return the stop value.
*/
@Generated
public List<String> getStop() {
return this.stop;
}
/**
* Set the stop property: A collection of textual sequences that will end completions generation.
*
* @param stop the stop value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setStop(List<String> stop) {
this.stop = stop;
return this;
}
/**
* Get the presencePenalty property: A value that influences the probability of generated tokens appearing based on
* their existing
* presence in generated text.
* Positive values will make tokens less likely to appear when they already exist and increase the
* model's likelihood to output new topics.
*
* @return the presencePenalty value.
*/
@Generated
public Double getPresencePenalty() {
return this.presencePenalty;
}
/**
* Set the presencePenalty property: A value that influences the probability of generated tokens appearing based on
* their existing
* presence in generated text.
* Positive values will make tokens less likely to appear when they already exist and increase the
* model's likelihood to output new topics.
*
* @param presencePenalty the presencePenalty value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setPresencePenalty(Double presencePenalty) {
this.presencePenalty = presencePenalty;
return this;
}
/**
* Get the frequencyPenalty property: A value that influences the probability of generated tokens appearing based
* on their cumulative
* frequency in generated text.
* Positive values will make tokens less likely to appear as their frequency increases and
* decrease the likelihood of the model repeating the same statements verbatim.
*
* @return the frequencyPenalty value.
*/
@Generated
public Double getFrequencyPenalty() {
return this.frequencyPenalty;
}
/**
* Set the frequencyPenalty property: A value that influences the probability of generated tokens appearing based
* on their cumulative
* frequency in generated text.
* Positive values will make tokens less likely to appear as their frequency increases and
* decrease the likelihood of the model repeating the same statements verbatim.
*
* @param frequencyPenalty the frequencyPenalty value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setFrequencyPenalty(Double frequencyPenalty) {
this.frequencyPenalty = frequencyPenalty;
return this;
}
/**
* Get the stream property: A value indicating whether chat completions should be streamed for this request.
*
* @return the stream value.
*/
@Generated
public Boolean isStream() {
return this.stream;
}
/**
* Set the stream property: A value indicating whether chat completions should be streamed for this request.
*
* @param stream the stream value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setStream(Boolean stream) {
this.stream = stream;
return this;
}
/**
* Get the model property: The model name to provide as part of this completions request.
* Not applicable to Azure OpenAI, where deployment information should be included in the Azure
* resource URI that's connected to.
*
* @return the model value.
*/
@Generated
public String getModel() {
return this.model;
}
/**
* Set the model property: The model name to provide as part of this completions request.
* Not applicable to Azure OpenAI, where deployment information should be included in the Azure
* resource URI that's connected to.
*
* @param model the model value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setModel(String model) {
this.model = model;
return this;
}
/*
* A list of functions the model may generate JSON inputs for.
*/
@Generated
@JsonProperty(value = "functions")
private List<FunctionDefinition> functions;
/*
* Controls how the model responds to function calls. "none" means the model does not call a function,
* and responds to the end-user. "auto" means the model can pick between an end-user or calling a function.
* Specifying a particular function via `{"name": "my_function"}` forces the model to call that function.
* "none" is the default when no functions are present. "auto" is the default if functions are present.
*/
@Generated
@JsonProperty(value = "function_call")
private BinaryData functionCall;
/*
* Field not used for serialization. This is a convenience helper field for the serialization of "function_call".
*/
@JsonIgnore
private FunctionCallConfig functionCallConfig;
/**
* Get the functions property: A list of functions the model may generate JSON inputs for.
*
* @return the functions value.
*/
@Generated
public List<FunctionDefinition> getFunctions() {
return this.functions;
}
/**
* Set the functions property: A list of functions the model may generate JSON inputs for.
*
* @param functions the functions value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setFunctions(List<FunctionDefinition> functions) {
this.functions = functions;
return this;
}
/**
* Get the functionCall property: Controls how the model responds to function calls. "none" means the model does not
* call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a
* function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function.
* "none" is the default when no functions are present. "auto" is the default if functions are present.
*
* @return the functionCall value.
*/
BinaryData getFunctionCallInternal() {
return this.functionCall;
}
/**
* Set the functionCall property: Controls how the model responds to function calls. "none" means the model does not
* call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a
* function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function.
* "none" is the default when no functions are present. "auto" is the default if functions are present.
*
* @param functionCall the functionCall value to set.
* @return the ChatCompletionsOptions object itself.
*/
ChatCompletionsOptions setFunctionCallInternal(BinaryData functionCall) {
this.functionCall = functionCall;
return this;
}
/**
* Get the functionCall property: Controls how the model responds to function calls. "none" means the model does not
* call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a
* function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function.
* "none" is the default when no functions are present. "auto" is the default if functions are present.
*
* @return the functionCall value.
*/
public FunctionCallConfig getFunctionCall() {
return this.functionCallConfig;
}
/**
* Set the functionCall property: Controls how the model responds to function calls. "none" means the model does not
* call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a
* function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function.
* "none" is the default when no functions are present. "auto" is the default if functions are present.
*
* @param functionCallConfig the functionCall value to set.
* @return the ChatCompletionsOptions object itself.
*/
/*
* The configuration entries for Azure OpenAI chat extensions that use them.
* This additional specification is only compatible with Azure OpenAI.
*/
@Generated
@JsonProperty(value = "dataSources")
private List<AzureChatExtensionConfiguration> dataSources;
/**
* Get the dataSources property: The configuration entries for Azure OpenAI chat extensions that use them.
* This additional specification is only compatible with Azure OpenAI.
*
* @return the dataSources value.
*/
@Generated
public List<AzureChatExtensionConfiguration> getDataSources() {
return this.dataSources;
}
/**
* Set the dataSources property: The configuration entries for Azure OpenAI chat extensions that use them.
* This additional specification is only compatible with Azure OpenAI.
*
* @param dataSources the dataSources value to set.
* @return the ChatCompletionsOptions object itself.
*/
@Generated
public ChatCompletionsOptions setDataSources(List<AzureChatExtensionConfiguration> dataSources) {
this.dataSources = dataSources;
return this;
}
} |
if (!isPlaybackMode()) { | public void canCRUDFunctionApp() throws Exception {
FunctionApp functionApp1 =
appServiceManager
.functionApps()
.define(webappName1)
.withRegion(Region.US_WEST)
.withNewResourceGroup(rgName1)
.create();
Assertions.assertNotNull(functionApp1);
Assertions.assertEquals(Region.US_WEST, functionApp1.region());
Assertions.assertFalse(functionApp1.alwaysOn());
AppServicePlan plan1 = appServiceManager.appServicePlans().getById(functionApp1.appServicePlanId());
Assertions.assertNotNull(plan1);
Assertions.assertEquals(Region.US_WEST, plan1.region());
Assertions.assertEquals(new PricingTier(SkuName.DYNAMIC.toString(), "Y1"), plan1.pricingTier());
FunctionAppResource functionAppResource1 = getStorageAccount(storageManager, functionApp1);
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_SHARE));
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value(),
functionAppResource1.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value());
String accountKey = functionAppResource1.storageAccount.getKeys().get(0).value();
if (!"REDACTED".equals(accountKey)) {
Assertions
.assertEquals(
functionAppResource1.storageAccount.getKeys().get(0).value(), functionAppResource1.accountKey);
}
FunctionApp functionApp2 =
appServiceManager
.functionApps()
.define(webappName2)
.withExistingAppServicePlan(plan1)
.withNewResourceGroup(rgName2)
.withExistingStorageAccount(functionApp1.storageAccount())
.create();
Assertions.assertNotNull(functionApp2);
Assertions.assertEquals(Region.US_WEST, functionApp2.region());
Assertions.assertFalse(functionApp2.alwaysOn());
FunctionApp functionApp3 =
appServiceManager
.functionApps()
.define(webappName3)
.withRegion(Region.US_WEST)
.withExistingResourceGroup(rgName2)
.withNewAppServicePlan(PricingTier.BASIC_B1)
.withExistingStorageAccount(functionApp1.storageAccount())
.create();
Assertions.assertNotNull(functionApp3);
Assertions.assertEquals(Region.US_WEST, functionApp3.region());
Assertions.assertTrue(functionApp3.alwaysOn());
FunctionAppResource functionAppResource3 = getStorageAccount(storageManager, functionApp3);
Assertions.assertFalse(functionAppResource3.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertFalse(functionAppResource3.appSettings.containsKey(KEY_CONTENT_SHARE));
accountKey = functionAppResource3.storageAccount.getKeys().get(0).value();
if (!"REDACTED".equals(accountKey)) {
Assertions
.assertEquals(
functionAppResource3.storageAccount.getKeys().get(0).value(), functionAppResource3.accountKey);
}
FunctionApp functionApp = appServiceManager.functionApps().getByResourceGroup(rgName1, functionApp1.name());
Assertions.assertEquals(functionApp1.id(), functionApp.id());
functionApp = appServiceManager.functionApps().getById(functionApp2.id());
Assertions.assertEquals(functionApp2.name(), functionApp.name());
PagedIterable<FunctionAppBasic> functionApps = appServiceManager.functionApps().listByResourceGroup(rgName1);
Assertions.assertEquals(1, TestUtilities.getSize(functionApps));
functionApps = appServiceManager.functionApps().listByResourceGroup(rgName2);
Assertions.assertEquals(2, TestUtilities.getSize(functionApps));
functionApp2.update().withNewStorageAccount(storageAccountName1, StorageAccountSkuType.STANDARD_LRS).apply();
Assertions.assertEquals(storageAccountName1, functionApp2.storageAccount().name());
FunctionAppResource functionAppResource2 = getStorageAccount(storageManager, functionApp2);
Assertions.assertTrue(functionAppResource2.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertTrue(functionAppResource2.appSettings.containsKey(KEY_CONTENT_SHARE));
Assertions
.assertEquals(
functionAppResource2.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value(),
functionAppResource2.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value());
Assertions.assertEquals(storageAccountName1, functionAppResource2.storageAccount.name());
accountKey = functionAppResource2.storageAccount.getKeys().get(0).value();
if (!"REDACTED".equals(accountKey)) {
Assertions
.assertEquals(
functionAppResource2.storageAccount.getKeys().get(0).value(), functionAppResource2.accountKey);
}
int numStorageAccountBefore =
TestUtilities.getSize(storageManager.storageAccounts().listByResourceGroup(rgName1));
functionApp1.update().withAppSetting("newKey", "newValue").apply();
int numStorageAccountAfter =
TestUtilities.getSize(storageManager.storageAccounts().listByResourceGroup(rgName1));
Assertions.assertEquals(numStorageAccountBefore, numStorageAccountAfter);
FunctionAppResource functionAppResource1Updated = getStorageAccount(storageManager, functionApp1);
Assertions.assertTrue(functionAppResource1Updated.appSettings.containsKey("newKey"));
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value(),
functionAppResource1Updated.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value());
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value(),
functionAppResource1Updated.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value());
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_CONTENT_SHARE).value(),
functionAppResource1Updated.appSettings.get(KEY_CONTENT_SHARE).value());
Assertions
.assertEquals(
functionAppResource1.storageAccount.name(), functionAppResource1Updated.storageAccount.name());
functionApp3.update().withNewAppServicePlan(PricingTier.STANDARD_S2).apply();
Assertions.assertNotEquals(functionApp3.appServicePlanId(), functionApp1.appServicePlanId());
Assertions.assertTrue(functionApp3.alwaysOn());
} | if (!"REDACTED".equals(accountKey)) { | public void canCRUDFunctionApp() throws Exception {
FunctionApp functionApp1 =
appServiceManager
.functionApps()
.define(webappName1)
.withRegion(Region.US_WEST)
.withNewResourceGroup(rgName1)
.create();
Assertions.assertNotNull(functionApp1);
Assertions.assertEquals(Region.US_WEST, functionApp1.region());
Assertions.assertFalse(functionApp1.alwaysOn());
AppServicePlan plan1 = appServiceManager.appServicePlans().getById(functionApp1.appServicePlanId());
Assertions.assertNotNull(plan1);
Assertions.assertEquals(Region.US_WEST, plan1.region());
Assertions.assertEquals(new PricingTier(SkuName.DYNAMIC.toString(), "Y1"), plan1.pricingTier());
FunctionAppResource functionAppResource1 = getStorageAccount(storageManager, functionApp1);
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_SHARE));
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value(),
functionAppResource1.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value());
if (!isPlaybackMode()) {
Assertions
.assertEquals(
functionAppResource1.storageAccount.getKeys().get(0).value(), functionAppResource1.accountKey);
}
FunctionApp functionApp2 =
appServiceManager
.functionApps()
.define(webappName2)
.withExistingAppServicePlan(plan1)
.withNewResourceGroup(rgName2)
.withExistingStorageAccount(functionApp1.storageAccount())
.create();
Assertions.assertNotNull(functionApp2);
Assertions.assertEquals(Region.US_WEST, functionApp2.region());
Assertions.assertFalse(functionApp2.alwaysOn());
FunctionApp functionApp3 =
appServiceManager
.functionApps()
.define(webappName3)
.withRegion(Region.US_WEST)
.withExistingResourceGroup(rgName2)
.withNewAppServicePlan(PricingTier.BASIC_B1)
.withExistingStorageAccount(functionApp1.storageAccount())
.create();
Assertions.assertNotNull(functionApp3);
Assertions.assertEquals(Region.US_WEST, functionApp3.region());
Assertions.assertTrue(functionApp3.alwaysOn());
FunctionAppResource functionAppResource3 = getStorageAccount(storageManager, functionApp3);
Assertions.assertFalse(functionAppResource3.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertFalse(functionAppResource3.appSettings.containsKey(KEY_CONTENT_SHARE));
if (!isPlaybackMode()) {
Assertions
.assertEquals(
functionAppResource3.storageAccount.getKeys().get(0).value(), functionAppResource3.accountKey);
}
FunctionApp functionApp = appServiceManager.functionApps().getByResourceGroup(rgName1, functionApp1.name());
Assertions.assertEquals(functionApp1.id(), functionApp.id());
functionApp = appServiceManager.functionApps().getById(functionApp2.id());
Assertions.assertEquals(functionApp2.name(), functionApp.name());
PagedIterable<FunctionAppBasic> functionApps = appServiceManager.functionApps().listByResourceGroup(rgName1);
Assertions.assertEquals(1, TestUtilities.getSize(functionApps));
functionApps = appServiceManager.functionApps().listByResourceGroup(rgName2);
Assertions.assertEquals(2, TestUtilities.getSize(functionApps));
functionApp2.update().withNewStorageAccount(storageAccountName1, StorageAccountSkuType.STANDARD_LRS).apply();
Assertions.assertEquals(storageAccountName1, functionApp2.storageAccount().name());
FunctionAppResource functionAppResource2 = getStorageAccount(storageManager, functionApp2);
Assertions.assertTrue(functionAppResource2.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertTrue(functionAppResource2.appSettings.containsKey(KEY_CONTENT_SHARE));
Assertions
.assertEquals(
functionAppResource2.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value(),
functionAppResource2.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value());
Assertions.assertEquals(storageAccountName1, functionAppResource2.storageAccount.name());
if (!isPlaybackMode()) {
Assertions
.assertEquals(
functionAppResource2.storageAccount.getKeys().get(0).value(), functionAppResource2.accountKey);
}
int numStorageAccountBefore =
TestUtilities.getSize(storageManager.storageAccounts().listByResourceGroup(rgName1));
functionApp1.update().withAppSetting("newKey", "newValue").apply();
int numStorageAccountAfter =
TestUtilities.getSize(storageManager.storageAccounts().listByResourceGroup(rgName1));
Assertions.assertEquals(numStorageAccountBefore, numStorageAccountAfter);
FunctionAppResource functionAppResource1Updated = getStorageAccount(storageManager, functionApp1);
Assertions.assertTrue(functionAppResource1Updated.appSettings.containsKey("newKey"));
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value(),
functionAppResource1Updated.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value());
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value(),
functionAppResource1Updated.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value());
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_CONTENT_SHARE).value(),
functionAppResource1Updated.appSettings.get(KEY_CONTENT_SHARE).value());
Assertions
.assertEquals(
functionAppResource1.storageAccount.name(), functionAppResource1Updated.storageAccount.name());
functionApp3.update().withNewAppServicePlan(PricingTier.STANDARD_S2).apply();
Assertions.assertNotEquals(functionApp3.appServicePlanId(), functionApp1.appServicePlanId());
Assertions.assertTrue(functionApp3.alwaysOn());
} | class FunctionAppsTests extends AppServiceTest {
private String rgName1 = "";
private String rgName2 = "";
private String webappName1 = "";
private String webappName2 = "";
private String webappName3 = "";
private String appServicePlanName1 = "";
private String appServicePlanName2 = "";
private String storageAccountName1 = "";
protected StorageManager storageManager;
@Override
protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) {
webappName1 = generateRandomResourceName("java-func-", 20);
webappName2 = generateRandomResourceName("java-func-", 20);
webappName3 = generateRandomResourceName("java-func-", 20);
appServicePlanName1 = generateRandomResourceName("java-asp-", 20);
appServicePlanName2 = generateRandomResourceName("java-asp-", 20);
storageAccountName1 = generateRandomResourceName("javastore", 20);
rgName1 = generateRandomResourceName("javacsmrg", 20);
rgName2 = generateRandomResourceName("javacsmrg", 20);
storageManager = buildManager(StorageManager.class, httpPipeline, profile);
super.initializeClients(httpPipeline, profile);
}
@Override
protected void cleanUpResources() {
if (rgName1 != null) {
resourceManager.resourceGroups().beginDeleteByName(rgName1);
}
if (rgName2 != null) {
try {
resourceManager.resourceGroups().beginDeleteByName(rgName2);
} catch (ManagementException e) {
}
}
}
@Test
private static final String FUNCTION_APP_PACKAGE_URL =
"https:
@Test
public void canCRUDLinuxFunctionApp() throws Exception {
rgName2 = null;
FunctionApp functionApp1 =
appServiceManager
.functionApps()
.define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxConsumptionPlan()
.withBuiltInImage(FunctionRuntimeStack.JAVA_8)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp1);
assertLinuxJava(functionApp1, FunctionRuntimeStack.JAVA_8);
Assertions.assertFalse(functionApp1.alwaysOn());
AppServicePlan plan1 = appServiceManager.appServicePlans().getById(functionApp1.appServicePlanId());
Assertions.assertNotNull(plan1);
Assertions.assertEquals(Region.US_EAST, plan1.region());
Assertions.assertEquals(new PricingTier(SkuName.DYNAMIC.toString(), "Y1"), plan1.pricingTier());
Assertions.assertTrue(plan1.innerModel().reserved());
Assertions
.assertTrue(
Arrays
.asList(functionApp1.innerModel().kind().split(Pattern.quote(",")))
.containsAll(Arrays.asList("linux", "functionapp")));
FunctionAppResource functionAppResource1 = getStorageAccount(storageManager, functionApp1);
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_SHARE));
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value(),
functionAppResource1.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value());
String accountKey = functionAppResource1.storageAccount.getKeys().get(0).value();
if (!"REDACTED".equals(accountKey)) {
Assertions
.assertEquals(
functionAppResource1.storageAccount.getKeys().get(0).value(), functionAppResource1.accountKey);
}
PagedIterable<FunctionAppBasic> functionApps = appServiceManager.functionApps().listByResourceGroup(rgName1);
Assertions.assertEquals(1, TestUtilities.getSize(functionApps));
FunctionApp functionApp2 =
appServiceManager
.functionApps()
.define(webappName2)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName1)
.withNewLinuxAppServicePlan(PricingTier.STANDARD_S1)
.withBuiltInImage(FunctionRuntimeStack.JAVA_8)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp2);
assertLinuxJava(functionApp2, FunctionRuntimeStack.JAVA_8);
Assertions.assertTrue(functionApp2.alwaysOn());
AppServicePlan plan2 = appServiceManager.appServicePlans().getById(functionApp2.appServicePlanId());
Assertions.assertNotNull(plan2);
Assertions.assertEquals(PricingTier.STANDARD_S1, plan2.pricingTier());
Assertions.assertTrue(plan2.innerModel().reserved());
FunctionApp functionApp3 =
appServiceManager
.functionApps()
.define(webappName3)
.withExistingLinuxAppServicePlan(plan2)
.withExistingResourceGroup(rgName1)
.withBuiltInImage(FunctionRuntimeStack.JAVA_8)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp3);
assertLinuxJava(functionApp3, FunctionRuntimeStack.JAVA_8);
Assertions.assertTrue(functionApp3.alwaysOn());
if (!isPlaybackMode()) {
ResourceManagerUtils.sleep(Duration.ofMinutes(3));
}
functionApps = appServiceManager.functionApps().listByResourceGroup(rgName1);
Assertions.assertEquals(3, TestUtilities.getSize(functionApps));
PagedIterable<FunctionEnvelope> functions =
appServiceManager.functionApps().listFunctions(functionApp1.resourceGroupName(), functionApp1.name());
Assertions.assertEquals(1, TestUtilities.getSize(functions));
functions =
appServiceManager.functionApps().listFunctions(functionApp2.resourceGroupName(), functionApp2.name());
Assertions.assertEquals(1, TestUtilities.getSize(functions));
functions =
appServiceManager.functionApps().listFunctions(functionApp3.resourceGroupName(), functionApp3.name());
Assertions.assertEquals(1, TestUtilities.getSize(functions));
}
@Test
public void canCRUDLinuxFunctionAppPremium() {
rgName2 = null;
FunctionApp functionApp1 =
appServiceManager
.functionApps()
.define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxAppServicePlan(new PricingTier(SkuName.ELASTIC_PREMIUM.toString(), "EP1"))
.withBuiltInImage(FunctionRuntimeStack.JAVA_8)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp1);
Assertions.assertFalse(functionApp1.alwaysOn());
AppServicePlan plan1 = appServiceManager.appServicePlans().getById(functionApp1.appServicePlanId());
Assertions.assertNotNull(plan1);
Assertions.assertEquals(new PricingTier(SkuName.ELASTIC_PREMIUM.toString(), "EP1"), plan1.pricingTier());
assertLinuxJava(functionApp1, FunctionRuntimeStack.JAVA_8);
FunctionAppResource functionAppResource1 = getStorageAccount(storageManager, functionApp1);
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_SHARE));
if (!isPlaybackMode()) {
ResourceManagerUtils.sleep(Duration.ofMinutes(3));
}
PagedIterable<FunctionEnvelope> functions =
appServiceManager.functionApps().listFunctions(functionApp1.resourceGroupName(), functionApp1.name());
Assertions.assertEquals(1, TestUtilities.getSize(functions));
}
@Test
@Disabled("Need container registry")
public void canCRUDLinuxFunctionAppPremiumDocker() {
FunctionApp functionApp1 =
appServiceManager
.functionApps()
.define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxAppServicePlan(new PricingTier(SkuName.ELASTIC_PREMIUM.toString(), "EP1"))
.withPrivateRegistryImage(
"weidxuregistry.azurecr.io/az-func-java:v1", "https:
.withCredentials("weidxuregistry", "PASSWORD")
.withRuntime("java")
.withRuntimeVersion("~3")
.create();
Assertions.assertFalse(functionApp1.alwaysOn());
if (!isPlaybackMode()) {
functionApp1.zipDeploy(new File(FunctionAppsTests.class.getResource("/java-functions.zip").getPath()));
}
}
@Test
public void canCRUDLinuxFunctionAppJava11() throws Exception {
rgName2 = null;
String runtimeVersion = "~4";
FunctionApp functionApp1 = appServiceManager.functionApps().define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxConsumptionPlan()
.withBuiltInImage(FunctionRuntimeStack.JAVA_11)
.withRuntimeVersion(runtimeVersion)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp1);
assertLinuxJava(functionApp1, FunctionRuntimeStack.JAVA_11, runtimeVersion);
assertRunning(functionApp1);
}
@Test
public void canCRUDLinuxFunctionAppJava17() throws Exception {
rgName2 = null;
FunctionApp functionApp1 = appServiceManager.functionApps().define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxConsumptionPlan()
.withBuiltInImage(FunctionRuntimeStack.JAVA_17)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp1);
assertLinuxJava(functionApp1, FunctionRuntimeStack.JAVA_17);
assertRunning(functionApp1);
}
@Test
public void canCreateAndUpdateFunctionAppWithContainerSize() {
rgName2 = null;
webappName1 = generateRandomResourceName("java-function-", 20);
String functionDeploymentSlotName = generateRandomResourceName("fds", 15);
FunctionApp functionApp1 = appServiceManager.functionApps()
.define(webappName1)
.withRegion(Region.US_WEST)
.withNewResourceGroup(rgName1)
.withContainerSize(512)
.create();
FunctionDeploymentSlot functionDeploymentSlot = functionApp1.deploymentSlots()
.define(functionDeploymentSlotName)
.withConfigurationFromParent()
.withContainerSize(256)
.create();
Assertions.assertEquals(512, functionApp1.containerSize());
Assertions.assertEquals(256, functionDeploymentSlot.containerSize());
functionApp1.update()
.withContainerSize(320)
.apply();
functionApp1.refresh();
Assertions.assertEquals(320, functionApp1.containerSize());
Assertions.assertEquals(256, functionDeploymentSlot.containerSize());
functionDeploymentSlot.update()
.withContainerSize(128)
.apply();
functionDeploymentSlot.refresh();
Assertions.assertEquals(128, functionDeploymentSlot.containerSize());
}
private void assertRunning(FunctionApp functionApp) {
if (!isPlaybackMode()) {
ResourceManagerUtils.sleep(Duration.ofMinutes(1));
String name = "linux_function_app";
Response<String> response = curl("https:
+ "/api/HttpTrigger-Java?name=" + name);
Assertions.assertEquals(200, response.getStatusCode());
String body = response.getValue();
Assertions.assertNotNull(body);
Assertions.assertTrue(body.contains("Hello, " + name));
}
}
private static Map<String, AppSetting> assertLinuxJava(FunctionApp functionApp, FunctionRuntimeStack stack) {
return assertLinuxJava(functionApp, stack, null);
}
private static Map<String, AppSetting> assertLinuxJava(FunctionApp functionApp, FunctionRuntimeStack stack,
String runtimeVersion) {
Assertions.assertEquals(stack.getLinuxFxVersion(), functionApp.linuxFxVersion());
Assertions
.assertTrue(
Arrays
.asList(functionApp.innerModel().kind().split(Pattern.quote(",")))
.containsAll(Arrays.asList("linux", "functionapp")));
Assertions.assertTrue(functionApp.innerModel().reserved());
Map<String, AppSetting> appSettings = functionApp.getAppSettings();
Assertions.assertNotNull(appSettings);
Assertions.assertNotNull(appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE));
Assertions.assertEquals(
stack.runtime(),
appSettings.get(KEY_FUNCTIONS_WORKER_RUNTIME).value());
Assertions.assertEquals(
runtimeVersion == null ? stack.version() : runtimeVersion,
appSettings.get(KEY_FUNCTIONS_EXTENSION_VERSION).value());
return appSettings;
}
private static final String KEY_AZURE_WEB_JOBS_STORAGE = "AzureWebJobsStorage";
private static final String KEY_CONTENT_AZURE_FILE_CONNECTION_STRING = "WEBSITE_CONTENTAZUREFILECONNECTIONSTRING";
private static final String KEY_CONTENT_SHARE = "WEBSITE_CONTENTSHARE";
private static final String KEY_FUNCTIONS_WORKER_RUNTIME = "FUNCTIONS_WORKER_RUNTIME";
private static final String KEY_FUNCTIONS_EXTENSION_VERSION = "FUNCTIONS_EXTENSION_VERSION";
private static final String ACCOUNT_NAME_SEGMENT = "AccountName=";
private static final String ACCOUNT_KEY_SEGMENT = "AccountKey=";
private static class FunctionAppResource {
Map<String, AppSetting> appSettings;
String accountName;
String accountKey;
StorageAccount storageAccount;
}
private static FunctionAppResource getStorageAccount(StorageManager storageManager, FunctionApp functionApp) {
FunctionAppResource resource = new FunctionAppResource();
resource.appSettings = functionApp.getAppSettings();
String storageAccountConnectionString = resource.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value();
String[] segments = storageAccountConnectionString.split(";");
for (String segment : segments) {
if (segment.startsWith(ACCOUNT_NAME_SEGMENT)) {
resource.accountName = segment.substring(ACCOUNT_NAME_SEGMENT.length());
} else if (segment.startsWith(ACCOUNT_KEY_SEGMENT)) {
resource.accountKey = segment.substring(ACCOUNT_KEY_SEGMENT.length());
}
}
if (resource.accountName != null) {
PagedIterable<StorageAccount> storageAccounts = storageManager.storageAccounts().list();
for (StorageAccount storageAccount : storageAccounts) {
if (resource.accountName.equals(storageAccount.name())) {
resource.storageAccount = storageAccount;
break;
}
}
}
return resource;
}
} | class FunctionAppsTests extends AppServiceTest {
private String rgName1 = "";
private String rgName2 = "";
private String webappName1 = "";
private String webappName2 = "";
private String webappName3 = "";
private String appServicePlanName1 = "";
private String appServicePlanName2 = "";
private String storageAccountName1 = "";
protected StorageManager storageManager;
@Override
protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) {
webappName1 = generateRandomResourceName("java-func-", 20);
webappName2 = generateRandomResourceName("java-func-", 20);
webappName3 = generateRandomResourceName("java-func-", 20);
appServicePlanName1 = generateRandomResourceName("java-asp-", 20);
appServicePlanName2 = generateRandomResourceName("java-asp-", 20);
storageAccountName1 = generateRandomResourceName("javastore", 20);
rgName1 = generateRandomResourceName("javacsmrg", 20);
rgName2 = generateRandomResourceName("javacsmrg", 20);
storageManager = buildManager(StorageManager.class, httpPipeline, profile);
super.initializeClients(httpPipeline, profile);
}
@Override
protected void cleanUpResources() {
if (rgName1 != null) {
resourceManager.resourceGroups().beginDeleteByName(rgName1);
}
if (rgName2 != null) {
try {
resourceManager.resourceGroups().beginDeleteByName(rgName2);
} catch (ManagementException e) {
}
}
}
@Test
private static final String FUNCTION_APP_PACKAGE_URL =
"https:
@Test
public void canCRUDLinuxFunctionApp() throws Exception {
rgName2 = null;
FunctionApp functionApp1 =
appServiceManager
.functionApps()
.define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxConsumptionPlan()
.withBuiltInImage(FunctionRuntimeStack.JAVA_8)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp1);
assertLinuxJava(functionApp1, FunctionRuntimeStack.JAVA_8);
Assertions.assertFalse(functionApp1.alwaysOn());
AppServicePlan plan1 = appServiceManager.appServicePlans().getById(functionApp1.appServicePlanId());
Assertions.assertNotNull(plan1);
Assertions.assertEquals(Region.US_EAST, plan1.region());
Assertions.assertEquals(new PricingTier(SkuName.DYNAMIC.toString(), "Y1"), plan1.pricingTier());
Assertions.assertTrue(plan1.innerModel().reserved());
Assertions
.assertTrue(
Arrays
.asList(functionApp1.innerModel().kind().split(Pattern.quote(",")))
.containsAll(Arrays.asList("linux", "functionapp")));
FunctionAppResource functionAppResource1 = getStorageAccount(storageManager, functionApp1);
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_SHARE));
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value(),
functionAppResource1.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value());
if (!isPlaybackMode()) {
Assertions
.assertEquals(
functionAppResource1.storageAccount.getKeys().get(0).value(), functionAppResource1.accountKey);
}
PagedIterable<FunctionAppBasic> functionApps = appServiceManager.functionApps().listByResourceGroup(rgName1);
Assertions.assertEquals(1, TestUtilities.getSize(functionApps));
FunctionApp functionApp2 =
appServiceManager
.functionApps()
.define(webappName2)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName1)
.withNewLinuxAppServicePlan(PricingTier.STANDARD_S1)
.withBuiltInImage(FunctionRuntimeStack.JAVA_8)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp2);
assertLinuxJava(functionApp2, FunctionRuntimeStack.JAVA_8);
Assertions.assertTrue(functionApp2.alwaysOn());
AppServicePlan plan2 = appServiceManager.appServicePlans().getById(functionApp2.appServicePlanId());
Assertions.assertNotNull(plan2);
Assertions.assertEquals(PricingTier.STANDARD_S1, plan2.pricingTier());
Assertions.assertTrue(plan2.innerModel().reserved());
FunctionApp functionApp3 =
appServiceManager
.functionApps()
.define(webappName3)
.withExistingLinuxAppServicePlan(plan2)
.withExistingResourceGroup(rgName1)
.withBuiltInImage(FunctionRuntimeStack.JAVA_8)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp3);
assertLinuxJava(functionApp3, FunctionRuntimeStack.JAVA_8);
Assertions.assertTrue(functionApp3.alwaysOn());
if (!isPlaybackMode()) {
ResourceManagerUtils.sleep(Duration.ofMinutes(3));
}
functionApps = appServiceManager.functionApps().listByResourceGroup(rgName1);
Assertions.assertEquals(3, TestUtilities.getSize(functionApps));
PagedIterable<FunctionEnvelope> functions =
appServiceManager.functionApps().listFunctions(functionApp1.resourceGroupName(), functionApp1.name());
Assertions.assertEquals(1, TestUtilities.getSize(functions));
functions =
appServiceManager.functionApps().listFunctions(functionApp2.resourceGroupName(), functionApp2.name());
Assertions.assertEquals(1, TestUtilities.getSize(functions));
functions =
appServiceManager.functionApps().listFunctions(functionApp3.resourceGroupName(), functionApp3.name());
Assertions.assertEquals(1, TestUtilities.getSize(functions));
}
@Test
public void canCRUDLinuxFunctionAppPremium() {
rgName2 = null;
FunctionApp functionApp1 =
appServiceManager
.functionApps()
.define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxAppServicePlan(new PricingTier(SkuName.ELASTIC_PREMIUM.toString(), "EP1"))
.withBuiltInImage(FunctionRuntimeStack.JAVA_8)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp1);
Assertions.assertFalse(functionApp1.alwaysOn());
AppServicePlan plan1 = appServiceManager.appServicePlans().getById(functionApp1.appServicePlanId());
Assertions.assertNotNull(plan1);
Assertions.assertEquals(new PricingTier(SkuName.ELASTIC_PREMIUM.toString(), "EP1"), plan1.pricingTier());
assertLinuxJava(functionApp1, FunctionRuntimeStack.JAVA_8);
FunctionAppResource functionAppResource1 = getStorageAccount(storageManager, functionApp1);
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_SHARE));
if (!isPlaybackMode()) {
ResourceManagerUtils.sleep(Duration.ofMinutes(3));
}
PagedIterable<FunctionEnvelope> functions =
appServiceManager.functionApps().listFunctions(functionApp1.resourceGroupName(), functionApp1.name());
Assertions.assertEquals(1, TestUtilities.getSize(functions));
}
@Test
@Disabled("Need container registry")
public void canCRUDLinuxFunctionAppPremiumDocker() {
FunctionApp functionApp1 =
appServiceManager
.functionApps()
.define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxAppServicePlan(new PricingTier(SkuName.ELASTIC_PREMIUM.toString(), "EP1"))
.withPrivateRegistryImage(
"weidxuregistry.azurecr.io/az-func-java:v1", "https:
.withCredentials("weidxuregistry", "PASSWORD")
.withRuntime("java")
.withRuntimeVersion("~3")
.create();
Assertions.assertFalse(functionApp1.alwaysOn());
if (!isPlaybackMode()) {
functionApp1.zipDeploy(new File(FunctionAppsTests.class.getResource("/java-functions.zip").getPath()));
}
}
@Test
public void canCRUDLinuxFunctionAppJava11() throws Exception {
rgName2 = null;
String runtimeVersion = "~4";
FunctionApp functionApp1 = appServiceManager.functionApps().define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxConsumptionPlan()
.withBuiltInImage(FunctionRuntimeStack.JAVA_11)
.withRuntimeVersion(runtimeVersion)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp1);
assertLinuxJava(functionApp1, FunctionRuntimeStack.JAVA_11, runtimeVersion);
assertRunning(functionApp1);
}
@Test
public void canCRUDLinuxFunctionAppJava17() throws Exception {
rgName2 = null;
FunctionApp functionApp1 = appServiceManager.functionApps().define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxConsumptionPlan()
.withBuiltInImage(FunctionRuntimeStack.JAVA_17)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp1);
assertLinuxJava(functionApp1, FunctionRuntimeStack.JAVA_17);
assertRunning(functionApp1);
}
@Test
public void canCreateAndUpdateFunctionAppWithContainerSize() {
rgName2 = null;
webappName1 = generateRandomResourceName("java-function-", 20);
String functionDeploymentSlotName = generateRandomResourceName("fds", 15);
FunctionApp functionApp1 = appServiceManager.functionApps()
.define(webappName1)
.withRegion(Region.US_WEST)
.withNewResourceGroup(rgName1)
.withContainerSize(512)
.create();
FunctionDeploymentSlot functionDeploymentSlot = functionApp1.deploymentSlots()
.define(functionDeploymentSlotName)
.withConfigurationFromParent()
.withContainerSize(256)
.create();
Assertions.assertEquals(512, functionApp1.containerSize());
Assertions.assertEquals(256, functionDeploymentSlot.containerSize());
functionApp1.update()
.withContainerSize(320)
.apply();
functionApp1.refresh();
Assertions.assertEquals(320, functionApp1.containerSize());
Assertions.assertEquals(256, functionDeploymentSlot.containerSize());
functionDeploymentSlot.update()
.withContainerSize(128)
.apply();
functionDeploymentSlot.refresh();
Assertions.assertEquals(128, functionDeploymentSlot.containerSize());
}
private void assertRunning(FunctionApp functionApp) {
if (!isPlaybackMode()) {
ResourceManagerUtils.sleep(Duration.ofMinutes(1));
String name = "linux_function_app";
Response<String> response = curl("https:
+ "/api/HttpTrigger-Java?name=" + name);
Assertions.assertEquals(200, response.getStatusCode());
String body = response.getValue();
Assertions.assertNotNull(body);
Assertions.assertTrue(body.contains("Hello, " + name));
}
}
private static Map<String, AppSetting> assertLinuxJava(FunctionApp functionApp, FunctionRuntimeStack stack) {
return assertLinuxJava(functionApp, stack, null);
}
private static Map<String, AppSetting> assertLinuxJava(FunctionApp functionApp, FunctionRuntimeStack stack,
String runtimeVersion) {
Assertions.assertEquals(stack.getLinuxFxVersion(), functionApp.linuxFxVersion());
Assertions
.assertTrue(
Arrays
.asList(functionApp.innerModel().kind().split(Pattern.quote(",")))
.containsAll(Arrays.asList("linux", "functionapp")));
Assertions.assertTrue(functionApp.innerModel().reserved());
Map<String, AppSetting> appSettings = functionApp.getAppSettings();
Assertions.assertNotNull(appSettings);
Assertions.assertNotNull(appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE));
Assertions.assertEquals(
stack.runtime(),
appSettings.get(KEY_FUNCTIONS_WORKER_RUNTIME).value());
Assertions.assertEquals(
runtimeVersion == null ? stack.version() : runtimeVersion,
appSettings.get(KEY_FUNCTIONS_EXTENSION_VERSION).value());
return appSettings;
}
private static final String KEY_AZURE_WEB_JOBS_STORAGE = "AzureWebJobsStorage";
private static final String KEY_CONTENT_AZURE_FILE_CONNECTION_STRING = "WEBSITE_CONTENTAZUREFILECONNECTIONSTRING";
private static final String KEY_CONTENT_SHARE = "WEBSITE_CONTENTSHARE";
private static final String KEY_FUNCTIONS_WORKER_RUNTIME = "FUNCTIONS_WORKER_RUNTIME";
private static final String KEY_FUNCTIONS_EXTENSION_VERSION = "FUNCTIONS_EXTENSION_VERSION";
private static final String ACCOUNT_NAME_SEGMENT = "AccountName=";
private static final String ACCOUNT_KEY_SEGMENT = "AccountKey=";
private static class FunctionAppResource {
Map<String, AppSetting> appSettings;
String accountName;
String accountKey;
StorageAccount storageAccount;
}
private static FunctionAppResource getStorageAccount(StorageManager storageManager, FunctionApp functionApp) {
FunctionAppResource resource = new FunctionAppResource();
resource.appSettings = functionApp.getAppSettings();
String storageAccountConnectionString = resource.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value();
String[] segments = storageAccountConnectionString.split(";");
for (String segment : segments) {
if (segment.startsWith(ACCOUNT_NAME_SEGMENT)) {
resource.accountName = segment.substring(ACCOUNT_NAME_SEGMENT.length());
} else if (segment.startsWith(ACCOUNT_KEY_SEGMENT)) {
resource.accountKey = segment.substring(ACCOUNT_KEY_SEGMENT.length());
}
}
if (resource.accountName != null) {
PagedIterable<StorageAccount> storageAccounts = storageManager.storageAccounts().list();
for (StorageAccount storageAccount : storageAccounts) {
if (resource.accountName.equals(storageAccount.name())) {
resource.storageAccount = storageAccount;
break;
}
}
}
return resource;
}
} |
This judgment is fixed to `if (!isPlaybackMode())` | public void canCRUDFunctionApp() throws Exception {
FunctionApp functionApp1 =
appServiceManager
.functionApps()
.define(webappName1)
.withRegion(Region.US_WEST)
.withNewResourceGroup(rgName1)
.create();
Assertions.assertNotNull(functionApp1);
Assertions.assertEquals(Region.US_WEST, functionApp1.region());
Assertions.assertFalse(functionApp1.alwaysOn());
AppServicePlan plan1 = appServiceManager.appServicePlans().getById(functionApp1.appServicePlanId());
Assertions.assertNotNull(plan1);
Assertions.assertEquals(Region.US_WEST, plan1.region());
Assertions.assertEquals(new PricingTier(SkuName.DYNAMIC.toString(), "Y1"), plan1.pricingTier());
FunctionAppResource functionAppResource1 = getStorageAccount(storageManager, functionApp1);
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_SHARE));
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value(),
functionAppResource1.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value());
String accountKey = functionAppResource1.storageAccount.getKeys().get(0).value();
if (!"REDACTED".equals(accountKey)) {
Assertions
.assertEquals(
functionAppResource1.storageAccount.getKeys().get(0).value(), functionAppResource1.accountKey);
}
FunctionApp functionApp2 =
appServiceManager
.functionApps()
.define(webappName2)
.withExistingAppServicePlan(plan1)
.withNewResourceGroup(rgName2)
.withExistingStorageAccount(functionApp1.storageAccount())
.create();
Assertions.assertNotNull(functionApp2);
Assertions.assertEquals(Region.US_WEST, functionApp2.region());
Assertions.assertFalse(functionApp2.alwaysOn());
FunctionApp functionApp3 =
appServiceManager
.functionApps()
.define(webappName3)
.withRegion(Region.US_WEST)
.withExistingResourceGroup(rgName2)
.withNewAppServicePlan(PricingTier.BASIC_B1)
.withExistingStorageAccount(functionApp1.storageAccount())
.create();
Assertions.assertNotNull(functionApp3);
Assertions.assertEquals(Region.US_WEST, functionApp3.region());
Assertions.assertTrue(functionApp3.alwaysOn());
FunctionAppResource functionAppResource3 = getStorageAccount(storageManager, functionApp3);
Assertions.assertFalse(functionAppResource3.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertFalse(functionAppResource3.appSettings.containsKey(KEY_CONTENT_SHARE));
accountKey = functionAppResource3.storageAccount.getKeys().get(0).value();
if (!"REDACTED".equals(accountKey)) {
Assertions
.assertEquals(
functionAppResource3.storageAccount.getKeys().get(0).value(), functionAppResource3.accountKey);
}
FunctionApp functionApp = appServiceManager.functionApps().getByResourceGroup(rgName1, functionApp1.name());
Assertions.assertEquals(functionApp1.id(), functionApp.id());
functionApp = appServiceManager.functionApps().getById(functionApp2.id());
Assertions.assertEquals(functionApp2.name(), functionApp.name());
PagedIterable<FunctionAppBasic> functionApps = appServiceManager.functionApps().listByResourceGroup(rgName1);
Assertions.assertEquals(1, TestUtilities.getSize(functionApps));
functionApps = appServiceManager.functionApps().listByResourceGroup(rgName2);
Assertions.assertEquals(2, TestUtilities.getSize(functionApps));
functionApp2.update().withNewStorageAccount(storageAccountName1, StorageAccountSkuType.STANDARD_LRS).apply();
Assertions.assertEquals(storageAccountName1, functionApp2.storageAccount().name());
FunctionAppResource functionAppResource2 = getStorageAccount(storageManager, functionApp2);
Assertions.assertTrue(functionAppResource2.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertTrue(functionAppResource2.appSettings.containsKey(KEY_CONTENT_SHARE));
Assertions
.assertEquals(
functionAppResource2.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value(),
functionAppResource2.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value());
Assertions.assertEquals(storageAccountName1, functionAppResource2.storageAccount.name());
accountKey = functionAppResource2.storageAccount.getKeys().get(0).value();
if (!"REDACTED".equals(accountKey)) {
Assertions
.assertEquals(
functionAppResource2.storageAccount.getKeys().get(0).value(), functionAppResource2.accountKey);
}
int numStorageAccountBefore =
TestUtilities.getSize(storageManager.storageAccounts().listByResourceGroup(rgName1));
functionApp1.update().withAppSetting("newKey", "newValue").apply();
int numStorageAccountAfter =
TestUtilities.getSize(storageManager.storageAccounts().listByResourceGroup(rgName1));
Assertions.assertEquals(numStorageAccountBefore, numStorageAccountAfter);
FunctionAppResource functionAppResource1Updated = getStorageAccount(storageManager, functionApp1);
Assertions.assertTrue(functionAppResource1Updated.appSettings.containsKey("newKey"));
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value(),
functionAppResource1Updated.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value());
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value(),
functionAppResource1Updated.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value());
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_CONTENT_SHARE).value(),
functionAppResource1Updated.appSettings.get(KEY_CONTENT_SHARE).value());
Assertions
.assertEquals(
functionAppResource1.storageAccount.name(), functionAppResource1Updated.storageAccount.name());
functionApp3.update().withNewAppServicePlan(PricingTier.STANDARD_S2).apply();
Assertions.assertNotEquals(functionApp3.appServicePlanId(), functionApp1.appServicePlanId());
Assertions.assertTrue(functionApp3.alwaysOn());
} | if (!"REDACTED".equals(accountKey)) { | public void canCRUDFunctionApp() throws Exception {
FunctionApp functionApp1 =
appServiceManager
.functionApps()
.define(webappName1)
.withRegion(Region.US_WEST)
.withNewResourceGroup(rgName1)
.create();
Assertions.assertNotNull(functionApp1);
Assertions.assertEquals(Region.US_WEST, functionApp1.region());
Assertions.assertFalse(functionApp1.alwaysOn());
AppServicePlan plan1 = appServiceManager.appServicePlans().getById(functionApp1.appServicePlanId());
Assertions.assertNotNull(plan1);
Assertions.assertEquals(Region.US_WEST, plan1.region());
Assertions.assertEquals(new PricingTier(SkuName.DYNAMIC.toString(), "Y1"), plan1.pricingTier());
FunctionAppResource functionAppResource1 = getStorageAccount(storageManager, functionApp1);
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_SHARE));
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value(),
functionAppResource1.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value());
if (!isPlaybackMode()) {
Assertions
.assertEquals(
functionAppResource1.storageAccount.getKeys().get(0).value(), functionAppResource1.accountKey);
}
FunctionApp functionApp2 =
appServiceManager
.functionApps()
.define(webappName2)
.withExistingAppServicePlan(plan1)
.withNewResourceGroup(rgName2)
.withExistingStorageAccount(functionApp1.storageAccount())
.create();
Assertions.assertNotNull(functionApp2);
Assertions.assertEquals(Region.US_WEST, functionApp2.region());
Assertions.assertFalse(functionApp2.alwaysOn());
FunctionApp functionApp3 =
appServiceManager
.functionApps()
.define(webappName3)
.withRegion(Region.US_WEST)
.withExistingResourceGroup(rgName2)
.withNewAppServicePlan(PricingTier.BASIC_B1)
.withExistingStorageAccount(functionApp1.storageAccount())
.create();
Assertions.assertNotNull(functionApp3);
Assertions.assertEquals(Region.US_WEST, functionApp3.region());
Assertions.assertTrue(functionApp3.alwaysOn());
FunctionAppResource functionAppResource3 = getStorageAccount(storageManager, functionApp3);
Assertions.assertFalse(functionAppResource3.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertFalse(functionAppResource3.appSettings.containsKey(KEY_CONTENT_SHARE));
if (!isPlaybackMode()) {
Assertions
.assertEquals(
functionAppResource3.storageAccount.getKeys().get(0).value(), functionAppResource3.accountKey);
}
FunctionApp functionApp = appServiceManager.functionApps().getByResourceGroup(rgName1, functionApp1.name());
Assertions.assertEquals(functionApp1.id(), functionApp.id());
functionApp = appServiceManager.functionApps().getById(functionApp2.id());
Assertions.assertEquals(functionApp2.name(), functionApp.name());
PagedIterable<FunctionAppBasic> functionApps = appServiceManager.functionApps().listByResourceGroup(rgName1);
Assertions.assertEquals(1, TestUtilities.getSize(functionApps));
functionApps = appServiceManager.functionApps().listByResourceGroup(rgName2);
Assertions.assertEquals(2, TestUtilities.getSize(functionApps));
functionApp2.update().withNewStorageAccount(storageAccountName1, StorageAccountSkuType.STANDARD_LRS).apply();
Assertions.assertEquals(storageAccountName1, functionApp2.storageAccount().name());
FunctionAppResource functionAppResource2 = getStorageAccount(storageManager, functionApp2);
Assertions.assertTrue(functionAppResource2.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertTrue(functionAppResource2.appSettings.containsKey(KEY_CONTENT_SHARE));
Assertions
.assertEquals(
functionAppResource2.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value(),
functionAppResource2.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value());
Assertions.assertEquals(storageAccountName1, functionAppResource2.storageAccount.name());
if (!isPlaybackMode()) {
Assertions
.assertEquals(
functionAppResource2.storageAccount.getKeys().get(0).value(), functionAppResource2.accountKey);
}
int numStorageAccountBefore =
TestUtilities.getSize(storageManager.storageAccounts().listByResourceGroup(rgName1));
functionApp1.update().withAppSetting("newKey", "newValue").apply();
int numStorageAccountAfter =
TestUtilities.getSize(storageManager.storageAccounts().listByResourceGroup(rgName1));
Assertions.assertEquals(numStorageAccountBefore, numStorageAccountAfter);
FunctionAppResource functionAppResource1Updated = getStorageAccount(storageManager, functionApp1);
Assertions.assertTrue(functionAppResource1Updated.appSettings.containsKey("newKey"));
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value(),
functionAppResource1Updated.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value());
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value(),
functionAppResource1Updated.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value());
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_CONTENT_SHARE).value(),
functionAppResource1Updated.appSettings.get(KEY_CONTENT_SHARE).value());
Assertions
.assertEquals(
functionAppResource1.storageAccount.name(), functionAppResource1Updated.storageAccount.name());
functionApp3.update().withNewAppServicePlan(PricingTier.STANDARD_S2).apply();
Assertions.assertNotEquals(functionApp3.appServicePlanId(), functionApp1.appServicePlanId());
Assertions.assertTrue(functionApp3.alwaysOn());
} | class FunctionAppsTests extends AppServiceTest {
private String rgName1 = "";
private String rgName2 = "";
private String webappName1 = "";
private String webappName2 = "";
private String webappName3 = "";
private String appServicePlanName1 = "";
private String appServicePlanName2 = "";
private String storageAccountName1 = "";
protected StorageManager storageManager;
@Override
protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) {
webappName1 = generateRandomResourceName("java-func-", 20);
webappName2 = generateRandomResourceName("java-func-", 20);
webappName3 = generateRandomResourceName("java-func-", 20);
appServicePlanName1 = generateRandomResourceName("java-asp-", 20);
appServicePlanName2 = generateRandomResourceName("java-asp-", 20);
storageAccountName1 = generateRandomResourceName("javastore", 20);
rgName1 = generateRandomResourceName("javacsmrg", 20);
rgName2 = generateRandomResourceName("javacsmrg", 20);
storageManager = buildManager(StorageManager.class, httpPipeline, profile);
super.initializeClients(httpPipeline, profile);
}
@Override
protected void cleanUpResources() {
if (rgName1 != null) {
resourceManager.resourceGroups().beginDeleteByName(rgName1);
}
if (rgName2 != null) {
try {
resourceManager.resourceGroups().beginDeleteByName(rgName2);
} catch (ManagementException e) {
}
}
}
@Test
private static final String FUNCTION_APP_PACKAGE_URL =
"https:
@Test
public void canCRUDLinuxFunctionApp() throws Exception {
rgName2 = null;
FunctionApp functionApp1 =
appServiceManager
.functionApps()
.define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxConsumptionPlan()
.withBuiltInImage(FunctionRuntimeStack.JAVA_8)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp1);
assertLinuxJava(functionApp1, FunctionRuntimeStack.JAVA_8);
Assertions.assertFalse(functionApp1.alwaysOn());
AppServicePlan plan1 = appServiceManager.appServicePlans().getById(functionApp1.appServicePlanId());
Assertions.assertNotNull(plan1);
Assertions.assertEquals(Region.US_EAST, plan1.region());
Assertions.assertEquals(new PricingTier(SkuName.DYNAMIC.toString(), "Y1"), plan1.pricingTier());
Assertions.assertTrue(plan1.innerModel().reserved());
Assertions
.assertTrue(
Arrays
.asList(functionApp1.innerModel().kind().split(Pattern.quote(",")))
.containsAll(Arrays.asList("linux", "functionapp")));
FunctionAppResource functionAppResource1 = getStorageAccount(storageManager, functionApp1);
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_SHARE));
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value(),
functionAppResource1.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value());
String accountKey = functionAppResource1.storageAccount.getKeys().get(0).value();
if (!"REDACTED".equals(accountKey)) {
Assertions
.assertEquals(
functionAppResource1.storageAccount.getKeys().get(0).value(), functionAppResource1.accountKey);
}
PagedIterable<FunctionAppBasic> functionApps = appServiceManager.functionApps().listByResourceGroup(rgName1);
Assertions.assertEquals(1, TestUtilities.getSize(functionApps));
FunctionApp functionApp2 =
appServiceManager
.functionApps()
.define(webappName2)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName1)
.withNewLinuxAppServicePlan(PricingTier.STANDARD_S1)
.withBuiltInImage(FunctionRuntimeStack.JAVA_8)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp2);
assertLinuxJava(functionApp2, FunctionRuntimeStack.JAVA_8);
Assertions.assertTrue(functionApp2.alwaysOn());
AppServicePlan plan2 = appServiceManager.appServicePlans().getById(functionApp2.appServicePlanId());
Assertions.assertNotNull(plan2);
Assertions.assertEquals(PricingTier.STANDARD_S1, plan2.pricingTier());
Assertions.assertTrue(plan2.innerModel().reserved());
FunctionApp functionApp3 =
appServiceManager
.functionApps()
.define(webappName3)
.withExistingLinuxAppServicePlan(plan2)
.withExistingResourceGroup(rgName1)
.withBuiltInImage(FunctionRuntimeStack.JAVA_8)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp3);
assertLinuxJava(functionApp3, FunctionRuntimeStack.JAVA_8);
Assertions.assertTrue(functionApp3.alwaysOn());
if (!isPlaybackMode()) {
ResourceManagerUtils.sleep(Duration.ofMinutes(3));
}
functionApps = appServiceManager.functionApps().listByResourceGroup(rgName1);
Assertions.assertEquals(3, TestUtilities.getSize(functionApps));
PagedIterable<FunctionEnvelope> functions =
appServiceManager.functionApps().listFunctions(functionApp1.resourceGroupName(), functionApp1.name());
Assertions.assertEquals(1, TestUtilities.getSize(functions));
functions =
appServiceManager.functionApps().listFunctions(functionApp2.resourceGroupName(), functionApp2.name());
Assertions.assertEquals(1, TestUtilities.getSize(functions));
functions =
appServiceManager.functionApps().listFunctions(functionApp3.resourceGroupName(), functionApp3.name());
Assertions.assertEquals(1, TestUtilities.getSize(functions));
}
@Test
public void canCRUDLinuxFunctionAppPremium() {
rgName2 = null;
FunctionApp functionApp1 =
appServiceManager
.functionApps()
.define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxAppServicePlan(new PricingTier(SkuName.ELASTIC_PREMIUM.toString(), "EP1"))
.withBuiltInImage(FunctionRuntimeStack.JAVA_8)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp1);
Assertions.assertFalse(functionApp1.alwaysOn());
AppServicePlan plan1 = appServiceManager.appServicePlans().getById(functionApp1.appServicePlanId());
Assertions.assertNotNull(plan1);
Assertions.assertEquals(new PricingTier(SkuName.ELASTIC_PREMIUM.toString(), "EP1"), plan1.pricingTier());
assertLinuxJava(functionApp1, FunctionRuntimeStack.JAVA_8);
FunctionAppResource functionAppResource1 = getStorageAccount(storageManager, functionApp1);
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_SHARE));
if (!isPlaybackMode()) {
ResourceManagerUtils.sleep(Duration.ofMinutes(3));
}
PagedIterable<FunctionEnvelope> functions =
appServiceManager.functionApps().listFunctions(functionApp1.resourceGroupName(), functionApp1.name());
Assertions.assertEquals(1, TestUtilities.getSize(functions));
}
@Test
@Disabled("Need container registry")
public void canCRUDLinuxFunctionAppPremiumDocker() {
FunctionApp functionApp1 =
appServiceManager
.functionApps()
.define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxAppServicePlan(new PricingTier(SkuName.ELASTIC_PREMIUM.toString(), "EP1"))
.withPrivateRegistryImage(
"weidxuregistry.azurecr.io/az-func-java:v1", "https:
.withCredentials("weidxuregistry", "PASSWORD")
.withRuntime("java")
.withRuntimeVersion("~3")
.create();
Assertions.assertFalse(functionApp1.alwaysOn());
if (!isPlaybackMode()) {
functionApp1.zipDeploy(new File(FunctionAppsTests.class.getResource("/java-functions.zip").getPath()));
}
}
@Test
public void canCRUDLinuxFunctionAppJava11() throws Exception {
rgName2 = null;
String runtimeVersion = "~4";
FunctionApp functionApp1 = appServiceManager.functionApps().define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxConsumptionPlan()
.withBuiltInImage(FunctionRuntimeStack.JAVA_11)
.withRuntimeVersion(runtimeVersion)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp1);
assertLinuxJava(functionApp1, FunctionRuntimeStack.JAVA_11, runtimeVersion);
assertRunning(functionApp1);
}
@Test
public void canCRUDLinuxFunctionAppJava17() throws Exception {
rgName2 = null;
FunctionApp functionApp1 = appServiceManager.functionApps().define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxConsumptionPlan()
.withBuiltInImage(FunctionRuntimeStack.JAVA_17)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp1);
assertLinuxJava(functionApp1, FunctionRuntimeStack.JAVA_17);
assertRunning(functionApp1);
}
@Test
public void canCreateAndUpdateFunctionAppWithContainerSize() {
rgName2 = null;
webappName1 = generateRandomResourceName("java-function-", 20);
String functionDeploymentSlotName = generateRandomResourceName("fds", 15);
FunctionApp functionApp1 = appServiceManager.functionApps()
.define(webappName1)
.withRegion(Region.US_WEST)
.withNewResourceGroup(rgName1)
.withContainerSize(512)
.create();
FunctionDeploymentSlot functionDeploymentSlot = functionApp1.deploymentSlots()
.define(functionDeploymentSlotName)
.withConfigurationFromParent()
.withContainerSize(256)
.create();
Assertions.assertEquals(512, functionApp1.containerSize());
Assertions.assertEquals(256, functionDeploymentSlot.containerSize());
functionApp1.update()
.withContainerSize(320)
.apply();
functionApp1.refresh();
Assertions.assertEquals(320, functionApp1.containerSize());
Assertions.assertEquals(256, functionDeploymentSlot.containerSize());
functionDeploymentSlot.update()
.withContainerSize(128)
.apply();
functionDeploymentSlot.refresh();
Assertions.assertEquals(128, functionDeploymentSlot.containerSize());
}
private void assertRunning(FunctionApp functionApp) {
if (!isPlaybackMode()) {
ResourceManagerUtils.sleep(Duration.ofMinutes(1));
String name = "linux_function_app";
Response<String> response = curl("https:
+ "/api/HttpTrigger-Java?name=" + name);
Assertions.assertEquals(200, response.getStatusCode());
String body = response.getValue();
Assertions.assertNotNull(body);
Assertions.assertTrue(body.contains("Hello, " + name));
}
}
private static Map<String, AppSetting> assertLinuxJava(FunctionApp functionApp, FunctionRuntimeStack stack) {
return assertLinuxJava(functionApp, stack, null);
}
private static Map<String, AppSetting> assertLinuxJava(FunctionApp functionApp, FunctionRuntimeStack stack,
String runtimeVersion) {
Assertions.assertEquals(stack.getLinuxFxVersion(), functionApp.linuxFxVersion());
Assertions
.assertTrue(
Arrays
.asList(functionApp.innerModel().kind().split(Pattern.quote(",")))
.containsAll(Arrays.asList("linux", "functionapp")));
Assertions.assertTrue(functionApp.innerModel().reserved());
Map<String, AppSetting> appSettings = functionApp.getAppSettings();
Assertions.assertNotNull(appSettings);
Assertions.assertNotNull(appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE));
Assertions.assertEquals(
stack.runtime(),
appSettings.get(KEY_FUNCTIONS_WORKER_RUNTIME).value());
Assertions.assertEquals(
runtimeVersion == null ? stack.version() : runtimeVersion,
appSettings.get(KEY_FUNCTIONS_EXTENSION_VERSION).value());
return appSettings;
}
private static final String KEY_AZURE_WEB_JOBS_STORAGE = "AzureWebJobsStorage";
private static final String KEY_CONTENT_AZURE_FILE_CONNECTION_STRING = "WEBSITE_CONTENTAZUREFILECONNECTIONSTRING";
private static final String KEY_CONTENT_SHARE = "WEBSITE_CONTENTSHARE";
private static final String KEY_FUNCTIONS_WORKER_RUNTIME = "FUNCTIONS_WORKER_RUNTIME";
private static final String KEY_FUNCTIONS_EXTENSION_VERSION = "FUNCTIONS_EXTENSION_VERSION";
private static final String ACCOUNT_NAME_SEGMENT = "AccountName=";
private static final String ACCOUNT_KEY_SEGMENT = "AccountKey=";
private static class FunctionAppResource {
Map<String, AppSetting> appSettings;
String accountName;
String accountKey;
StorageAccount storageAccount;
}
private static FunctionAppResource getStorageAccount(StorageManager storageManager, FunctionApp functionApp) {
FunctionAppResource resource = new FunctionAppResource();
resource.appSettings = functionApp.getAppSettings();
String storageAccountConnectionString = resource.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value();
String[] segments = storageAccountConnectionString.split(";");
for (String segment : segments) {
if (segment.startsWith(ACCOUNT_NAME_SEGMENT)) {
resource.accountName = segment.substring(ACCOUNT_NAME_SEGMENT.length());
} else if (segment.startsWith(ACCOUNT_KEY_SEGMENT)) {
resource.accountKey = segment.substring(ACCOUNT_KEY_SEGMENT.length());
}
}
if (resource.accountName != null) {
PagedIterable<StorageAccount> storageAccounts = storageManager.storageAccounts().list();
for (StorageAccount storageAccount : storageAccounts) {
if (resource.accountName.equals(storageAccount.name())) {
resource.storageAccount = storageAccount;
break;
}
}
}
return resource;
}
} | class FunctionAppsTests extends AppServiceTest {
private String rgName1 = "";
private String rgName2 = "";
private String webappName1 = "";
private String webappName2 = "";
private String webappName3 = "";
private String appServicePlanName1 = "";
private String appServicePlanName2 = "";
private String storageAccountName1 = "";
protected StorageManager storageManager;
@Override
protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) {
webappName1 = generateRandomResourceName("java-func-", 20);
webappName2 = generateRandomResourceName("java-func-", 20);
webappName3 = generateRandomResourceName("java-func-", 20);
appServicePlanName1 = generateRandomResourceName("java-asp-", 20);
appServicePlanName2 = generateRandomResourceName("java-asp-", 20);
storageAccountName1 = generateRandomResourceName("javastore", 20);
rgName1 = generateRandomResourceName("javacsmrg", 20);
rgName2 = generateRandomResourceName("javacsmrg", 20);
storageManager = buildManager(StorageManager.class, httpPipeline, profile);
super.initializeClients(httpPipeline, profile);
}
@Override
protected void cleanUpResources() {
if (rgName1 != null) {
resourceManager.resourceGroups().beginDeleteByName(rgName1);
}
if (rgName2 != null) {
try {
resourceManager.resourceGroups().beginDeleteByName(rgName2);
} catch (ManagementException e) {
}
}
}
@Test
private static final String FUNCTION_APP_PACKAGE_URL =
"https:
@Test
public void canCRUDLinuxFunctionApp() throws Exception {
rgName2 = null;
FunctionApp functionApp1 =
appServiceManager
.functionApps()
.define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxConsumptionPlan()
.withBuiltInImage(FunctionRuntimeStack.JAVA_8)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp1);
assertLinuxJava(functionApp1, FunctionRuntimeStack.JAVA_8);
Assertions.assertFalse(functionApp1.alwaysOn());
AppServicePlan plan1 = appServiceManager.appServicePlans().getById(functionApp1.appServicePlanId());
Assertions.assertNotNull(plan1);
Assertions.assertEquals(Region.US_EAST, plan1.region());
Assertions.assertEquals(new PricingTier(SkuName.DYNAMIC.toString(), "Y1"), plan1.pricingTier());
Assertions.assertTrue(plan1.innerModel().reserved());
Assertions
.assertTrue(
Arrays
.asList(functionApp1.innerModel().kind().split(Pattern.quote(",")))
.containsAll(Arrays.asList("linux", "functionapp")));
FunctionAppResource functionAppResource1 = getStorageAccount(storageManager, functionApp1);
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_SHARE));
Assertions
.assertEquals(
functionAppResource1.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value(),
functionAppResource1.appSettings.get(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING).value());
if (!isPlaybackMode()) {
Assertions
.assertEquals(
functionAppResource1.storageAccount.getKeys().get(0).value(), functionAppResource1.accountKey);
}
PagedIterable<FunctionAppBasic> functionApps = appServiceManager.functionApps().listByResourceGroup(rgName1);
Assertions.assertEquals(1, TestUtilities.getSize(functionApps));
FunctionApp functionApp2 =
appServiceManager
.functionApps()
.define(webappName2)
.withRegion(Region.US_EAST)
.withExistingResourceGroup(rgName1)
.withNewLinuxAppServicePlan(PricingTier.STANDARD_S1)
.withBuiltInImage(FunctionRuntimeStack.JAVA_8)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp2);
assertLinuxJava(functionApp2, FunctionRuntimeStack.JAVA_8);
Assertions.assertTrue(functionApp2.alwaysOn());
AppServicePlan plan2 = appServiceManager.appServicePlans().getById(functionApp2.appServicePlanId());
Assertions.assertNotNull(plan2);
Assertions.assertEquals(PricingTier.STANDARD_S1, plan2.pricingTier());
Assertions.assertTrue(plan2.innerModel().reserved());
FunctionApp functionApp3 =
appServiceManager
.functionApps()
.define(webappName3)
.withExistingLinuxAppServicePlan(plan2)
.withExistingResourceGroup(rgName1)
.withBuiltInImage(FunctionRuntimeStack.JAVA_8)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp3);
assertLinuxJava(functionApp3, FunctionRuntimeStack.JAVA_8);
Assertions.assertTrue(functionApp3.alwaysOn());
if (!isPlaybackMode()) {
ResourceManagerUtils.sleep(Duration.ofMinutes(3));
}
functionApps = appServiceManager.functionApps().listByResourceGroup(rgName1);
Assertions.assertEquals(3, TestUtilities.getSize(functionApps));
PagedIterable<FunctionEnvelope> functions =
appServiceManager.functionApps().listFunctions(functionApp1.resourceGroupName(), functionApp1.name());
Assertions.assertEquals(1, TestUtilities.getSize(functions));
functions =
appServiceManager.functionApps().listFunctions(functionApp2.resourceGroupName(), functionApp2.name());
Assertions.assertEquals(1, TestUtilities.getSize(functions));
functions =
appServiceManager.functionApps().listFunctions(functionApp3.resourceGroupName(), functionApp3.name());
Assertions.assertEquals(1, TestUtilities.getSize(functions));
}
@Test
public void canCRUDLinuxFunctionAppPremium() {
rgName2 = null;
FunctionApp functionApp1 =
appServiceManager
.functionApps()
.define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxAppServicePlan(new PricingTier(SkuName.ELASTIC_PREMIUM.toString(), "EP1"))
.withBuiltInImage(FunctionRuntimeStack.JAVA_8)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp1);
Assertions.assertFalse(functionApp1.alwaysOn());
AppServicePlan plan1 = appServiceManager.appServicePlans().getById(functionApp1.appServicePlanId());
Assertions.assertNotNull(plan1);
Assertions.assertEquals(new PricingTier(SkuName.ELASTIC_PREMIUM.toString(), "EP1"), plan1.pricingTier());
assertLinuxJava(functionApp1, FunctionRuntimeStack.JAVA_8);
FunctionAppResource functionAppResource1 = getStorageAccount(storageManager, functionApp1);
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_AZURE_FILE_CONNECTION_STRING));
Assertions.assertTrue(functionAppResource1.appSettings.containsKey(KEY_CONTENT_SHARE));
if (!isPlaybackMode()) {
ResourceManagerUtils.sleep(Duration.ofMinutes(3));
}
PagedIterable<FunctionEnvelope> functions =
appServiceManager.functionApps().listFunctions(functionApp1.resourceGroupName(), functionApp1.name());
Assertions.assertEquals(1, TestUtilities.getSize(functions));
}
@Test
@Disabled("Need container registry")
public void canCRUDLinuxFunctionAppPremiumDocker() {
FunctionApp functionApp1 =
appServiceManager
.functionApps()
.define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxAppServicePlan(new PricingTier(SkuName.ELASTIC_PREMIUM.toString(), "EP1"))
.withPrivateRegistryImage(
"weidxuregistry.azurecr.io/az-func-java:v1", "https:
.withCredentials("weidxuregistry", "PASSWORD")
.withRuntime("java")
.withRuntimeVersion("~3")
.create();
Assertions.assertFalse(functionApp1.alwaysOn());
if (!isPlaybackMode()) {
functionApp1.zipDeploy(new File(FunctionAppsTests.class.getResource("/java-functions.zip").getPath()));
}
}
@Test
public void canCRUDLinuxFunctionAppJava11() throws Exception {
rgName2 = null;
String runtimeVersion = "~4";
FunctionApp functionApp1 = appServiceManager.functionApps().define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxConsumptionPlan()
.withBuiltInImage(FunctionRuntimeStack.JAVA_11)
.withRuntimeVersion(runtimeVersion)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp1);
assertLinuxJava(functionApp1, FunctionRuntimeStack.JAVA_11, runtimeVersion);
assertRunning(functionApp1);
}
@Test
public void canCRUDLinuxFunctionAppJava17() throws Exception {
rgName2 = null;
FunctionApp functionApp1 = appServiceManager.functionApps().define(webappName1)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName1)
.withNewLinuxConsumptionPlan()
.withBuiltInImage(FunctionRuntimeStack.JAVA_17)
.withHttpsOnly(true)
.withAppSetting("WEBSITE_RUN_FROM_PACKAGE", FUNCTION_APP_PACKAGE_URL)
.create();
Assertions.assertNotNull(functionApp1);
assertLinuxJava(functionApp1, FunctionRuntimeStack.JAVA_17);
assertRunning(functionApp1);
}
@Test
public void canCreateAndUpdateFunctionAppWithContainerSize() {
rgName2 = null;
webappName1 = generateRandomResourceName("java-function-", 20);
String functionDeploymentSlotName = generateRandomResourceName("fds", 15);
FunctionApp functionApp1 = appServiceManager.functionApps()
.define(webappName1)
.withRegion(Region.US_WEST)
.withNewResourceGroup(rgName1)
.withContainerSize(512)
.create();
FunctionDeploymentSlot functionDeploymentSlot = functionApp1.deploymentSlots()
.define(functionDeploymentSlotName)
.withConfigurationFromParent()
.withContainerSize(256)
.create();
Assertions.assertEquals(512, functionApp1.containerSize());
Assertions.assertEquals(256, functionDeploymentSlot.containerSize());
functionApp1.update()
.withContainerSize(320)
.apply();
functionApp1.refresh();
Assertions.assertEquals(320, functionApp1.containerSize());
Assertions.assertEquals(256, functionDeploymentSlot.containerSize());
functionDeploymentSlot.update()
.withContainerSize(128)
.apply();
functionDeploymentSlot.refresh();
Assertions.assertEquals(128, functionDeploymentSlot.containerSize());
}
private void assertRunning(FunctionApp functionApp) {
if (!isPlaybackMode()) {
ResourceManagerUtils.sleep(Duration.ofMinutes(1));
String name = "linux_function_app";
Response<String> response = curl("https:
+ "/api/HttpTrigger-Java?name=" + name);
Assertions.assertEquals(200, response.getStatusCode());
String body = response.getValue();
Assertions.assertNotNull(body);
Assertions.assertTrue(body.contains("Hello, " + name));
}
}
private static Map<String, AppSetting> assertLinuxJava(FunctionApp functionApp, FunctionRuntimeStack stack) {
return assertLinuxJava(functionApp, stack, null);
}
private static Map<String, AppSetting> assertLinuxJava(FunctionApp functionApp, FunctionRuntimeStack stack,
String runtimeVersion) {
Assertions.assertEquals(stack.getLinuxFxVersion(), functionApp.linuxFxVersion());
Assertions
.assertTrue(
Arrays
.asList(functionApp.innerModel().kind().split(Pattern.quote(",")))
.containsAll(Arrays.asList("linux", "functionapp")));
Assertions.assertTrue(functionApp.innerModel().reserved());
Map<String, AppSetting> appSettings = functionApp.getAppSettings();
Assertions.assertNotNull(appSettings);
Assertions.assertNotNull(appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE));
Assertions.assertEquals(
stack.runtime(),
appSettings.get(KEY_FUNCTIONS_WORKER_RUNTIME).value());
Assertions.assertEquals(
runtimeVersion == null ? stack.version() : runtimeVersion,
appSettings.get(KEY_FUNCTIONS_EXTENSION_VERSION).value());
return appSettings;
}
private static final String KEY_AZURE_WEB_JOBS_STORAGE = "AzureWebJobsStorage";
private static final String KEY_CONTENT_AZURE_FILE_CONNECTION_STRING = "WEBSITE_CONTENTAZUREFILECONNECTIONSTRING";
private static final String KEY_CONTENT_SHARE = "WEBSITE_CONTENTSHARE";
private static final String KEY_FUNCTIONS_WORKER_RUNTIME = "FUNCTIONS_WORKER_RUNTIME";
private static final String KEY_FUNCTIONS_EXTENSION_VERSION = "FUNCTIONS_EXTENSION_VERSION";
private static final String ACCOUNT_NAME_SEGMENT = "AccountName=";
private static final String ACCOUNT_KEY_SEGMENT = "AccountKey=";
private static class FunctionAppResource {
Map<String, AppSetting> appSettings;
String accountName;
String accountKey;
StorageAccount storageAccount;
}
private static FunctionAppResource getStorageAccount(StorageManager storageManager, FunctionApp functionApp) {
FunctionAppResource resource = new FunctionAppResource();
resource.appSettings = functionApp.getAppSettings();
String storageAccountConnectionString = resource.appSettings.get(KEY_AZURE_WEB_JOBS_STORAGE).value();
String[] segments = storageAccountConnectionString.split(";");
for (String segment : segments) {
if (segment.startsWith(ACCOUNT_NAME_SEGMENT)) {
resource.accountName = segment.substring(ACCOUNT_NAME_SEGMENT.length());
} else if (segment.startsWith(ACCOUNT_KEY_SEGMENT)) {
resource.accountKey = segment.substring(ACCOUNT_KEY_SEGMENT.length());
}
}
if (resource.accountName != null) {
PagedIterable<StorageAccount> storageAccounts = storageManager.storageAccounts().list();
for (StorageAccount storageAccount : storageAccounts) {
if (resource.accountName.equals(storageAccount.name())) {
resource.storageAccount = storageAccount;
break;
}
}
}
return resource;
}
} |
when would I not want the body pretty printed? I guess we're trying to save lines in log files? | public HttpLoggingPolicy(HttpLogOptions httpLogOptions) {
if (httpLogOptions == null) {
this.httpLogDetailLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
this.allowedHeaderNames = HttpLogOptions.DEFAULT_HEADERS_ALLOWLIST
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = HttpLogOptions.DEFAULT_QUERY_PARAMS_ALLOWLIST
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = false;
this.requestLogger = new DefaultHttpRequestLogger();
this.responseLogger = new DefaultHttpResponseLogger();
} else {
this.httpLogDetailLevel = httpLogOptions.getLogLevel();
this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames()
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames()
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = httpLogOptions.isPrettyPrintBody();
this.requestLogger = (httpLogOptions.getRequestLogger() == null)
? new DefaultHttpRequestLogger()
: httpLogOptions.getRequestLogger();
this.responseLogger = (httpLogOptions.getResponseLogger() == null)
? new DefaultHttpResponseLogger()
: httpLogOptions.getResponseLogger();
}
} | this.prettyPrintBody = false; | public HttpLoggingPolicy(HttpLogOptions httpLogOptions) {
if (httpLogOptions == null) {
this.httpLogDetailLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
this.allowedHeaderNames = HttpLogOptions.DEFAULT_HEADERS_ALLOWLIST
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = HttpLogOptions.DEFAULT_QUERY_PARAMS_ALLOWLIST
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = false;
this.requestLogger = new DefaultHttpRequestLogger();
this.responseLogger = new DefaultHttpResponseLogger();
} else {
this.httpLogDetailLevel = httpLogOptions.getLogLevel();
this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames()
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames()
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = httpLogOptions.isPrettyPrintBody();
this.requestLogger = (httpLogOptions.getRequestLogger() == null)
? new DefaultHttpRequestLogger()
: httpLogOptions.getRequestLogger();
this.responseLogger = (httpLogOptions.getResponseLogger() == null)
? new DefaultHttpResponseLogger()
: httpLogOptions.getResponseLogger();
}
} | class HttpLoggingPolicy implements HttpPipelinePolicy {
private static final ObjectMapperShim PRETTY_PRINTER = ObjectMapperShim.createPrettyPrintMapper();
private static final int MAX_BODY_LOG_SIZE = 1024 * 16;
private static final String REDACTED_PLACEHOLDER = "REDACTED";
private static final int LOGGER_CACHE_MAX_SIZE = 1000;
private static final Map<String, ClientLogger> CALLER_METHOD_LOGGER_CACHE = new ConcurrentHashMap<>();
private static final ClientLogger LOGGER = new ClientLogger(HttpLoggingPolicy.class);
private final HttpLogDetailLevel httpLogDetailLevel;
private final Set<String> allowedHeaderNames;
private final Set<String> allowedQueryParameterNames;
private final boolean prettyPrintBody;
private final HttpRequestLogger requestLogger;
private final HttpResponseLogger responseLogger;
/**
* Key for {@link Context} to pass request retry count metadata for logging.
*/
public static final String RETRY_COUNT_CONTEXT = "requestRetryCount";
private static final String REQUEST_LOG_MESSAGE = "HTTP request";
private static final String RESPONSE_LOG_MESSAGE = "HTTP response";
/**
* Creates an HttpLoggingPolicy with the given log configurations.
*
* @param httpLogOptions The HTTP logging configuration options.
*/
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.process();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
return requestLogger.logRequest(logger, getRequestLoggingOptions(context))
.then(next.process())
.flatMap(response -> responseLogger.logResponse(logger,
getResponseLoggingOptions(response, startNs, context)))
.doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable));
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.processSync();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
requestLogger.logRequestSync(logger, getRequestLoggingOptions(context));
try {
HttpResponse response = next.processSync();
if (response != null) {
response = responseLogger.logResponseSync(
logger, getResponseLoggingOptions(response, startNs, context));
}
return response;
} catch (RuntimeException e) {
logger.warning("<-- HTTP FAILED: ", e);
throw logger.logExceptionAsWarning(e);
}
}
private HttpRequestLoggingContext getRequestLoggingOptions(HttpPipelineCallContext callContext) {
return new HttpRequestLoggingContext(callContext.getHttpRequest(),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private HttpResponseLoggingContext getResponseLoggingOptions(HttpResponse httpResponse, long startNs,
HttpPipelineCallContext callContext) {
return new HttpResponseLoggingContext(httpResponse, Duration.ofNanos(System.nanoTime() - startNs),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private final class DefaultHttpRequestLogger implements HttpRequestLogger {
@Override
public Mono<Void> logRequest(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
return Mono.empty();
}
@Override
public void logRequestSync(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
}
private void log(LogLevel logLevel, ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final HttpRequest request = loggingOptions.getHttpRequest();
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.HTTP_METHOD_KEY, request.getHttpMethod())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(request.getUrl(), allowedQueryParameterNames));
Integer retryCount = loggingOptions.getTryCount();
if (retryCount != null) {
logBuilder.addKeyValue(LoggingKeys.TRY_COUNT_KEY, retryCount);
}
}
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, request.getHeaders(), logBuilder);
}
if (request.getBody() == null) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, 0)
.log(REQUEST_LOG_MESSAGE);
return;
}
String contentType = request.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, request.getHeaders());
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLength);
if (httpLogDetailLevel.shouldLogBody() && shouldBodyBeLogged(contentType, contentLength)) {
logBody(request, (int) contentLength, logBuilder, logger, contentType);
return;
}
logBuilder.log(REQUEST_LOG_MESSAGE);
}
}
private void logBody(HttpRequest request, int contentLength, LoggingEventBuilder logBuilder, ClientLogger logger, String contentType) {
BinaryData data = request.getBodyAsBinaryData();
BinaryDataContent content = BinaryDataHelper.getContent(data);
if (content instanceof StringContent
|| content instanceof ByteBufferContent
|| content instanceof SerializableContent
|| content instanceof ByteArrayContent) {
logBody(logBuilder, logger, contentType, content.toString());
} else if (content instanceof InputStreamContent) {
byte[] contentBytes = content.toBytes();
request.setBody(contentBytes);
logBody(logBuilder, logger, contentType, new String(contentBytes, StandardCharsets.UTF_8));
} else {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
request.setBody(Flux.using(() -> stream, s -> content.toFluxByteBuffer()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> logBody(logBuilder, logger, contentType, s.toString(StandardCharsets.UTF_8))));
}
}
private void logBody(LoggingEventBuilder logBuilder, ClientLogger logger, String contentType, String data) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody, contentType, data))
.log(REQUEST_LOG_MESSAGE);
}
private final class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return Mono.just(new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return Mono.just(response);
}
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder);
}
}
private void logUrl(HttpResponseLoggingContext loggingOptions, HttpResponse response,
LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
}
}
private void logContentLength(HttpResponse response, LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaderValue(HttpHeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody);
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
}
/*
* Generates the redacted URL for logging.
*
* @param url URL where the request is being sent.
* @return A URL with query parameters redacted based on configurations in this policy.
*/
private static String getRedactedUrl(URL url, Set<String> allowedQueryParameterNames) {
String query = url.getQuery();
if (CoreUtils.isNullOrEmpty(query)) {
return url.toString();
}
UrlBuilder urlBuilder = ImplUtils.parseUrl(url, false);
CoreUtils.parseQueryParameters(query).forEachRemaining(queryParam -> {
if (allowedQueryParameterNames.contains(queryParam.getKey().toLowerCase(Locale.ROOT))) {
urlBuilder.addQueryParameter(queryParam.getKey(), queryParam.getValue());
} else {
urlBuilder.addQueryParameter(queryParam.getKey(), REDACTED_PLACEHOLDER);
}
});
return urlBuilder.toString();
}
/*
* Adds HTTP headers into the StringBuilder that is generating the log message.
*
* @param headers HTTP headers on the request or response.
* @param sb StringBuilder that is generating the log message.
* @param logLevel Log level the environment is configured to use.
*/
private static void addHeadersToLogMessage(Set<String> allowedHeaderNames, HttpHeaders headers,
LoggingEventBuilder logBuilder) {
for (HttpHeader header : headers) {
String headerName = header.getName();
logBuilder.addKeyValue(headerName, allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))
? header.getValue() : REDACTED_PLACEHOLDER);
}
}
/*
* Determines and attempts to pretty print the body if it is JSON.
*
* <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p>
*
* @param logger Logger used to log a warning if the body fails to pretty print as JSON.
* @param contentType Content-Type header.
* @param body Body of the request or response.
* @return The body pretty printed if it is JSON, otherwise the unmodified body.
*/
private static String prettyPrintIfNeeded(ClientLogger logger, boolean prettyPrintBody, String contentType,
String body) {
String result = body;
if (prettyPrintBody && contentType != null
&& (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) {
try {
final Object deserialized = PRETTY_PRINTER.readTree(body);
result = PRETTY_PRINTER.writeValueAsString(deserialized);
} catch (Exception e) {
logger.warning("Failed to pretty print JSON", e);
}
}
return result;
}
/*
* Attempts to retrieve and parse the Content-Length header into a numeric representation.
*
* @param logger Logger used to log a warning if the Content-Length header is an invalid number.
* @param headers HTTP headers that are checked for containing Content-Length.
* @return
*/
private static long getContentLength(ClientLogger logger, HttpHeaders headers) {
long contentLength = 0;
String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException | NullPointerException e) {
logger.warning("Could not parse the HTTP header content-length: '{}'.", contentLengthString, e);
}
return contentLength;
}
/*
* Determines if the request or response body should be logged.
*
* <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body
* isn't empty and is less than 16KB in size.</p>
*
* @param contentTypeHeader Content-Type header value.
* @param contentLength Content-Length header represented as a numeric.
* @return A flag indicating if the request or response body should be logged.
*/
private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader)
&& contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
}
/*
* Gets the request retry count to include in logging.
*
* If there is no value set, or it isn't a valid number null will be returned indicating that retry count won't be
* logged.
*/
private static Integer getRequestRetryCount(Context context) {
Object rawRetryCount = context.getData(RETRY_COUNT_CONTEXT).orElse(null);
if (rawRetryCount == null) {
return null;
}
try {
return Integer.valueOf(rawRetryCount.toString());
} catch (NumberFormatException ex) {
LOGGER.warning("Could not parse the request retry count: '{}'.", rawRetryCount);
return null;
}
}
/*
* Get or create the ClientLogger for the method having its request and response logged.
*/
private static ClientLogger getOrCreateMethodLogger(String methodName) {
if (CALLER_METHOD_LOGGER_CACHE.size() > LOGGER_CACHE_MAX_SIZE) {
CALLER_METHOD_LOGGER_CACHE.clear();
}
return CALLER_METHOD_LOGGER_CACHE.computeIfAbsent(methodName, ClientLogger::new);
}
private static LoggingEventBuilder getLogBuilder(LogLevel logLevel, ClientLogger logger) {
switch (logLevel) {
case ERROR:
return logger.atError();
case WARNING:
return logger.atWarning();
case INFORMATIONAL:
return logger.atInfo();
case VERBOSE:
default:
return logger.atVerbose();
}
}
private static final class LoggingHttpResponse extends HttpResponse {
private final HttpResponse actualResponse;
private final LoggingEventBuilder logBuilder;
private final int contentLength;
private final ClientLogger logger;
private final boolean prettyPrintBody;
private final String contentTypeHeader;
private LoggingHttpResponse(HttpResponse actualResponse, LoggingEventBuilder logBuilder,
ClientLogger logger, int contentLength, String contentTypeHeader,
boolean prettyPrintBody) {
super(actualResponse.getRequest());
this.actualResponse = actualResponse;
this.logBuilder = logBuilder;
this.logger = logger;
this.contentLength = contentLength;
this.contentTypeHeader = contentTypeHeader;
this.prettyPrintBody = prettyPrintBody;
}
@Override
public int getStatusCode() {
return actualResponse.getStatusCode();
}
@Override
@Deprecated
public String getHeaderValue(String name) {
return actualResponse.getHeaderValue(name);
}
@Override
public String getHeaderValue(HttpHeaderName headerName) {
return actualResponse.getHeaderValue(headerName);
}
@Override
public HttpHeaders getHeaders() {
return actualResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
return Flux.using(() -> stream, s -> actualResponse.getBody()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> doLog(s.toString(StandardCharsets.UTF_8)));
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesFromNetworkResponse(getBody(), actualResponse.getHeaders());
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(String::new);
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BinaryData getBodyAsBinaryData() {
BinaryData content = actualResponse.getBodyAsBinaryData();
doLog(content.toString());
return content;
}
@Override
public void close() {
actualResponse.close();
}
private void doLog(String body) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY,
prettyPrintIfNeeded(logger, prettyPrintBody, contentTypeHeader, body))
.log(RESPONSE_LOG_MESSAGE);
}
}
} | class HttpLoggingPolicy implements HttpPipelinePolicy {
private static final ObjectMapperShim PRETTY_PRINTER = ObjectMapperShim.createPrettyPrintMapper();
private static final int MAX_BODY_LOG_SIZE = 1024 * 16;
private static final String REDACTED_PLACEHOLDER = "REDACTED";
private static final int LOGGER_CACHE_MAX_SIZE = 1000;
private static final Map<String, ClientLogger> CALLER_METHOD_LOGGER_CACHE = new ConcurrentHashMap<>();
private static final ClientLogger LOGGER = new ClientLogger(HttpLoggingPolicy.class);
private final HttpLogDetailLevel httpLogDetailLevel;
private final Set<String> allowedHeaderNames;
private final Set<String> allowedQueryParameterNames;
private final boolean prettyPrintBody;
private final HttpRequestLogger requestLogger;
private final HttpResponseLogger responseLogger;
/**
* Key for {@link Context} to pass request retry count metadata for logging.
*/
public static final String RETRY_COUNT_CONTEXT = "requestRetryCount";
private static final String REQUEST_LOG_MESSAGE = "HTTP request";
private static final String RESPONSE_LOG_MESSAGE = "HTTP response";
/**
* Creates an HttpLoggingPolicy with the given log configurations.
*
* @param httpLogOptions The HTTP logging configuration options.
*/
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.process();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
return requestLogger.logRequest(logger, getRequestLoggingOptions(context))
.then(next.process())
.flatMap(response -> responseLogger.logResponse(logger,
getResponseLoggingOptions(response, startNs, context)))
.doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable));
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.processSync();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
requestLogger.logRequestSync(logger, getRequestLoggingOptions(context));
try {
HttpResponse response = next.processSync();
if (response != null) {
response = responseLogger.logResponseSync(
logger, getResponseLoggingOptions(response, startNs, context));
}
return response;
} catch (RuntimeException e) {
logger.warning("<-- HTTP FAILED: ", e);
throw logger.logExceptionAsWarning(e);
}
}
private HttpRequestLoggingContext getRequestLoggingOptions(HttpPipelineCallContext callContext) {
return new HttpRequestLoggingContext(callContext.getHttpRequest(),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private HttpResponseLoggingContext getResponseLoggingOptions(HttpResponse httpResponse, long startNs,
HttpPipelineCallContext callContext) {
return new HttpResponseLoggingContext(httpResponse, Duration.ofNanos(System.nanoTime() - startNs),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private final class DefaultHttpRequestLogger implements HttpRequestLogger {
@Override
public Mono<Void> logRequest(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
return Mono.empty();
}
@Override
public void logRequestSync(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
}
private void log(LogLevel logLevel, ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final HttpRequest request = loggingOptions.getHttpRequest();
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.HTTP_METHOD_KEY, request.getHttpMethod())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(request.getUrl(), allowedQueryParameterNames));
Integer retryCount = loggingOptions.getTryCount();
if (retryCount != null) {
logBuilder.addKeyValue(LoggingKeys.TRY_COUNT_KEY, retryCount);
}
}
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, request.getHeaders(), logBuilder);
}
if (request.getBody() == null) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, 0)
.log(REQUEST_LOG_MESSAGE);
return;
}
String contentType = request.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, request.getHeaders());
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLength);
if (httpLogDetailLevel.shouldLogBody() && shouldBodyBeLogged(contentType, contentLength)) {
logBody(request, (int) contentLength, logBuilder, logger, contentType);
return;
}
logBuilder.log(REQUEST_LOG_MESSAGE);
}
}
private void logBody(HttpRequest request, int contentLength, LoggingEventBuilder logBuilder, ClientLogger logger, String contentType) {
BinaryData data = request.getBodyAsBinaryData();
BinaryDataContent content = BinaryDataHelper.getContent(data);
if (content instanceof StringContent
|| content instanceof ByteBufferContent
|| content instanceof SerializableContent
|| content instanceof ByteArrayContent) {
logBody(logBuilder, logger, contentType, content.toString());
} else if (content instanceof InputStreamContent) {
byte[] contentBytes = content.toBytes();
request.setBody(contentBytes);
logBody(logBuilder, logger, contentType, new String(contentBytes, StandardCharsets.UTF_8));
} else {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
request.setBody(Flux.using(() -> stream, s -> content.toFluxByteBuffer()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> logBody(logBuilder, logger, contentType, s.toString(StandardCharsets.UTF_8))));
}
}
private void logBody(LoggingEventBuilder logBuilder, ClientLogger logger, String contentType, String data) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody, contentType, data))
.log(REQUEST_LOG_MESSAGE);
}
private final class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return Mono.just(new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return Mono.just(response);
}
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder);
}
}
private void logUrl(HttpResponseLoggingContext loggingOptions, HttpResponse response,
LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
}
}
private void logContentLength(HttpResponse response, LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaderValue(HttpHeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody);
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
}
/*
* Generates the redacted URL for logging.
*
* @param url URL where the request is being sent.
* @return A URL with query parameters redacted based on configurations in this policy.
*/
private static String getRedactedUrl(URL url, Set<String> allowedQueryParameterNames) {
String query = url.getQuery();
if (CoreUtils.isNullOrEmpty(query)) {
return url.toString();
}
UrlBuilder urlBuilder = ImplUtils.parseUrl(url, false);
CoreUtils.parseQueryParameters(query).forEachRemaining(queryParam -> {
if (allowedQueryParameterNames.contains(queryParam.getKey().toLowerCase(Locale.ROOT))) {
urlBuilder.addQueryParameter(queryParam.getKey(), queryParam.getValue());
} else {
urlBuilder.addQueryParameter(queryParam.getKey(), REDACTED_PLACEHOLDER);
}
});
return urlBuilder.toString();
}
/*
* Adds HTTP headers into the StringBuilder that is generating the log message.
*
* @param headers HTTP headers on the request or response.
* @param sb StringBuilder that is generating the log message.
* @param logLevel Log level the environment is configured to use.
*/
private static void addHeadersToLogMessage(Set<String> allowedHeaderNames, HttpHeaders headers,
LoggingEventBuilder logBuilder) {
for (HttpHeader header : headers) {
String headerName = header.getName();
logBuilder.addKeyValue(headerName, allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))
? header.getValue() : REDACTED_PLACEHOLDER);
}
}
/*
* Determines and attempts to pretty print the body if it is JSON.
*
* <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p>
*
* @param logger Logger used to log a warning if the body fails to pretty print as JSON.
* @param contentType Content-Type header.
* @param body Body of the request or response.
* @return The body pretty printed if it is JSON, otherwise the unmodified body.
*/
private static String prettyPrintIfNeeded(ClientLogger logger, boolean prettyPrintBody, String contentType,
String body) {
String result = body;
if (prettyPrintBody && contentType != null
&& (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) {
try {
final Object deserialized = PRETTY_PRINTER.readTree(body);
result = PRETTY_PRINTER.writeValueAsString(deserialized);
} catch (Exception e) {
logger.warning("Failed to pretty print JSON", e);
}
}
return result;
}
/*
* Attempts to retrieve and parse the Content-Length header into a numeric representation.
*
* @param logger Logger used to log a warning if the Content-Length header is an invalid number.
* @param headers HTTP headers that are checked for containing Content-Length.
* @return
*/
private static long getContentLength(ClientLogger logger, HttpHeaders headers) {
long contentLength = 0;
String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException | NullPointerException e) {
logger.warning("Could not parse the HTTP header content-length: '{}'.", contentLengthString, e);
}
return contentLength;
}
/*
* Determines if the request or response body should be logged.
*
* <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body
* isn't empty and is less than 16KB in size.</p>
*
* @param contentTypeHeader Content-Type header value.
* @param contentLength Content-Length header represented as a numeric.
* @return A flag indicating if the request or response body should be logged.
*/
private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader)
&& contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
}
/*
* Gets the request retry count to include in logging.
*
* If there is no value set, or it isn't a valid number null will be returned indicating that retry count won't be
* logged.
*/
private static Integer getRequestRetryCount(Context context) {
Object rawRetryCount = context.getData(RETRY_COUNT_CONTEXT).orElse(null);
if (rawRetryCount == null) {
return null;
}
try {
return Integer.valueOf(rawRetryCount.toString());
} catch (NumberFormatException ex) {
LOGGER.warning("Could not parse the request retry count: '{}'.", rawRetryCount);
return null;
}
}
/*
* Get or create the ClientLogger for the method having its request and response logged.
*/
private static ClientLogger getOrCreateMethodLogger(String methodName) {
if (CALLER_METHOD_LOGGER_CACHE.size() > LOGGER_CACHE_MAX_SIZE) {
CALLER_METHOD_LOGGER_CACHE.clear();
}
return CALLER_METHOD_LOGGER_CACHE.computeIfAbsent(methodName, ClientLogger::new);
}
private static LoggingEventBuilder getLogBuilder(LogLevel logLevel, ClientLogger logger) {
switch (logLevel) {
case ERROR:
return logger.atError();
case WARNING:
return logger.atWarning();
case INFORMATIONAL:
return logger.atInfo();
case VERBOSE:
default:
return logger.atVerbose();
}
}
private static final class LoggingHttpResponse extends HttpResponse {
private final HttpResponse actualResponse;
private final LoggingEventBuilder logBuilder;
private final int contentLength;
private final ClientLogger logger;
private final boolean prettyPrintBody;
private final String contentTypeHeader;
private LoggingHttpResponse(HttpResponse actualResponse, LoggingEventBuilder logBuilder,
ClientLogger logger, int contentLength, String contentTypeHeader,
boolean prettyPrintBody) {
super(actualResponse.getRequest());
this.actualResponse = actualResponse;
this.logBuilder = logBuilder;
this.logger = logger;
this.contentLength = contentLength;
this.contentTypeHeader = contentTypeHeader;
this.prettyPrintBody = prettyPrintBody;
}
@Override
public int getStatusCode() {
return actualResponse.getStatusCode();
}
@Override
@Deprecated
public String getHeaderValue(String name) {
return actualResponse.getHeaderValue(name);
}
@Override
public String getHeaderValue(HttpHeaderName headerName) {
return actualResponse.getHeaderValue(headerName);
}
@Override
public HttpHeaders getHeaders() {
return actualResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
return Flux.using(() -> stream, s -> actualResponse.getBody()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> doLog(s.toString(StandardCharsets.UTF_8)));
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesFromNetworkResponse(getBody(), actualResponse.getHeaders());
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(String::new);
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BinaryData getBodyAsBinaryData() {
BinaryData content = actualResponse.getBodyAsBinaryData();
doLog(content.toString());
return content;
}
@Override
public void close() {
actualResponse.close();
}
private void doLog(String body) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY,
prettyPrintIfNeeded(logger, prettyPrintBody, contentTypeHeader, body))
.log(RESPONSE_LOG_MESSAGE);
}
}
} |
we really don't want pretty-printed bodies. Structured logs are exported somewhere where pretty printing is not necessary or, when sent to file or stdout, multi-line prettiness becomes ungreppable and unparseable. | public HttpLoggingPolicy(HttpLogOptions httpLogOptions) {
if (httpLogOptions == null) {
this.httpLogDetailLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
this.allowedHeaderNames = HttpLogOptions.DEFAULT_HEADERS_ALLOWLIST
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = HttpLogOptions.DEFAULT_QUERY_PARAMS_ALLOWLIST
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = false;
this.requestLogger = new DefaultHttpRequestLogger();
this.responseLogger = new DefaultHttpResponseLogger();
} else {
this.httpLogDetailLevel = httpLogOptions.getLogLevel();
this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames()
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames()
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = httpLogOptions.isPrettyPrintBody();
this.requestLogger = (httpLogOptions.getRequestLogger() == null)
? new DefaultHttpRequestLogger()
: httpLogOptions.getRequestLogger();
this.responseLogger = (httpLogOptions.getResponseLogger() == null)
? new DefaultHttpResponseLogger()
: httpLogOptions.getResponseLogger();
}
} | this.prettyPrintBody = false; | public HttpLoggingPolicy(HttpLogOptions httpLogOptions) {
if (httpLogOptions == null) {
this.httpLogDetailLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL;
this.allowedHeaderNames = HttpLogOptions.DEFAULT_HEADERS_ALLOWLIST
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = HttpLogOptions.DEFAULT_QUERY_PARAMS_ALLOWLIST
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = false;
this.requestLogger = new DefaultHttpRequestLogger();
this.responseLogger = new DefaultHttpResponseLogger();
} else {
this.httpLogDetailLevel = httpLogOptions.getLogLevel();
this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames()
.stream()
.map(headerName -> headerName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames()
.stream()
.map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT))
.collect(Collectors.toSet());
this.prettyPrintBody = httpLogOptions.isPrettyPrintBody();
this.requestLogger = (httpLogOptions.getRequestLogger() == null)
? new DefaultHttpRequestLogger()
: httpLogOptions.getRequestLogger();
this.responseLogger = (httpLogOptions.getResponseLogger() == null)
? new DefaultHttpResponseLogger()
: httpLogOptions.getResponseLogger();
}
} | class HttpLoggingPolicy implements HttpPipelinePolicy {
private static final ObjectMapperShim PRETTY_PRINTER = ObjectMapperShim.createPrettyPrintMapper();
private static final int MAX_BODY_LOG_SIZE = 1024 * 16;
private static final String REDACTED_PLACEHOLDER = "REDACTED";
private static final int LOGGER_CACHE_MAX_SIZE = 1000;
private static final Map<String, ClientLogger> CALLER_METHOD_LOGGER_CACHE = new ConcurrentHashMap<>();
private static final ClientLogger LOGGER = new ClientLogger(HttpLoggingPolicy.class);
private final HttpLogDetailLevel httpLogDetailLevel;
private final Set<String> allowedHeaderNames;
private final Set<String> allowedQueryParameterNames;
private final boolean prettyPrintBody;
private final HttpRequestLogger requestLogger;
private final HttpResponseLogger responseLogger;
/**
* Key for {@link Context} to pass request retry count metadata for logging.
*/
public static final String RETRY_COUNT_CONTEXT = "requestRetryCount";
private static final String REQUEST_LOG_MESSAGE = "HTTP request";
private static final String RESPONSE_LOG_MESSAGE = "HTTP response";
/**
* Creates an HttpLoggingPolicy with the given log configurations.
*
* @param httpLogOptions The HTTP logging configuration options.
*/
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.process();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
return requestLogger.logRequest(logger, getRequestLoggingOptions(context))
.then(next.process())
.flatMap(response -> responseLogger.logResponse(logger,
getResponseLoggingOptions(response, startNs, context)))
.doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable));
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.processSync();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
requestLogger.logRequestSync(logger, getRequestLoggingOptions(context));
try {
HttpResponse response = next.processSync();
if (response != null) {
response = responseLogger.logResponseSync(
logger, getResponseLoggingOptions(response, startNs, context));
}
return response;
} catch (RuntimeException e) {
logger.warning("<-- HTTP FAILED: ", e);
throw logger.logExceptionAsWarning(e);
}
}
private HttpRequestLoggingContext getRequestLoggingOptions(HttpPipelineCallContext callContext) {
return new HttpRequestLoggingContext(callContext.getHttpRequest(),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private HttpResponseLoggingContext getResponseLoggingOptions(HttpResponse httpResponse, long startNs,
HttpPipelineCallContext callContext) {
return new HttpResponseLoggingContext(httpResponse, Duration.ofNanos(System.nanoTime() - startNs),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private final class DefaultHttpRequestLogger implements HttpRequestLogger {
@Override
public Mono<Void> logRequest(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
return Mono.empty();
}
@Override
public void logRequestSync(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
}
private void log(LogLevel logLevel, ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final HttpRequest request = loggingOptions.getHttpRequest();
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.HTTP_METHOD_KEY, request.getHttpMethod())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(request.getUrl(), allowedQueryParameterNames));
Integer retryCount = loggingOptions.getTryCount();
if (retryCount != null) {
logBuilder.addKeyValue(LoggingKeys.TRY_COUNT_KEY, retryCount);
}
}
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, request.getHeaders(), logBuilder);
}
if (request.getBody() == null) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, 0)
.log(REQUEST_LOG_MESSAGE);
return;
}
String contentType = request.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, request.getHeaders());
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLength);
if (httpLogDetailLevel.shouldLogBody() && shouldBodyBeLogged(contentType, contentLength)) {
logBody(request, (int) contentLength, logBuilder, logger, contentType);
return;
}
logBuilder.log(REQUEST_LOG_MESSAGE);
}
}
private void logBody(HttpRequest request, int contentLength, LoggingEventBuilder logBuilder, ClientLogger logger, String contentType) {
BinaryData data = request.getBodyAsBinaryData();
BinaryDataContent content = BinaryDataHelper.getContent(data);
if (content instanceof StringContent
|| content instanceof ByteBufferContent
|| content instanceof SerializableContent
|| content instanceof ByteArrayContent) {
logBody(logBuilder, logger, contentType, content.toString());
} else if (content instanceof InputStreamContent) {
byte[] contentBytes = content.toBytes();
request.setBody(contentBytes);
logBody(logBuilder, logger, contentType, new String(contentBytes, StandardCharsets.UTF_8));
} else {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
request.setBody(Flux.using(() -> stream, s -> content.toFluxByteBuffer()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> logBody(logBuilder, logger, contentType, s.toString(StandardCharsets.UTF_8))));
}
}
private void logBody(LoggingEventBuilder logBuilder, ClientLogger logger, String contentType, String data) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody, contentType, data))
.log(REQUEST_LOG_MESSAGE);
}
private final class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return Mono.just(new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return Mono.just(response);
}
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder);
}
}
private void logUrl(HttpResponseLoggingContext loggingOptions, HttpResponse response,
LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
}
}
private void logContentLength(HttpResponse response, LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaderValue(HttpHeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody);
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
}
/*
* Generates the redacted URL for logging.
*
* @param url URL where the request is being sent.
* @return A URL with query parameters redacted based on configurations in this policy.
*/
private static String getRedactedUrl(URL url, Set<String> allowedQueryParameterNames) {
String query = url.getQuery();
if (CoreUtils.isNullOrEmpty(query)) {
return url.toString();
}
UrlBuilder urlBuilder = ImplUtils.parseUrl(url, false);
CoreUtils.parseQueryParameters(query).forEachRemaining(queryParam -> {
if (allowedQueryParameterNames.contains(queryParam.getKey().toLowerCase(Locale.ROOT))) {
urlBuilder.addQueryParameter(queryParam.getKey(), queryParam.getValue());
} else {
urlBuilder.addQueryParameter(queryParam.getKey(), REDACTED_PLACEHOLDER);
}
});
return urlBuilder.toString();
}
/*
* Adds HTTP headers into the StringBuilder that is generating the log message.
*
* @param headers HTTP headers on the request or response.
* @param sb StringBuilder that is generating the log message.
* @param logLevel Log level the environment is configured to use.
*/
private static void addHeadersToLogMessage(Set<String> allowedHeaderNames, HttpHeaders headers,
LoggingEventBuilder logBuilder) {
for (HttpHeader header : headers) {
String headerName = header.getName();
logBuilder.addKeyValue(headerName, allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))
? header.getValue() : REDACTED_PLACEHOLDER);
}
}
/*
* Determines and attempts to pretty print the body if it is JSON.
*
* <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p>
*
* @param logger Logger used to log a warning if the body fails to pretty print as JSON.
* @param contentType Content-Type header.
* @param body Body of the request or response.
* @return The body pretty printed if it is JSON, otherwise the unmodified body.
*/
private static String prettyPrintIfNeeded(ClientLogger logger, boolean prettyPrintBody, String contentType,
String body) {
String result = body;
if (prettyPrintBody && contentType != null
&& (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) {
try {
final Object deserialized = PRETTY_PRINTER.readTree(body);
result = PRETTY_PRINTER.writeValueAsString(deserialized);
} catch (Exception e) {
logger.warning("Failed to pretty print JSON", e);
}
}
return result;
}
/*
* Attempts to retrieve and parse the Content-Length header into a numeric representation.
*
* @param logger Logger used to log a warning if the Content-Length header is an invalid number.
* @param headers HTTP headers that are checked for containing Content-Length.
* @return
*/
private static long getContentLength(ClientLogger logger, HttpHeaders headers) {
long contentLength = 0;
String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException | NullPointerException e) {
logger.warning("Could not parse the HTTP header content-length: '{}'.", contentLengthString, e);
}
return contentLength;
}
/*
* Determines if the request or response body should be logged.
*
* <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body
* isn't empty and is less than 16KB in size.</p>
*
* @param contentTypeHeader Content-Type header value.
* @param contentLength Content-Length header represented as a numeric.
* @return A flag indicating if the request or response body should be logged.
*/
private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader)
&& contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
}
/*
* Gets the request retry count to include in logging.
*
* If there is no value set, or it isn't a valid number null will be returned indicating that retry count won't be
* logged.
*/
private static Integer getRequestRetryCount(Context context) {
Object rawRetryCount = context.getData(RETRY_COUNT_CONTEXT).orElse(null);
if (rawRetryCount == null) {
return null;
}
try {
return Integer.valueOf(rawRetryCount.toString());
} catch (NumberFormatException ex) {
LOGGER.warning("Could not parse the request retry count: '{}'.", rawRetryCount);
return null;
}
}
/*
* Get or create the ClientLogger for the method having its request and response logged.
*/
private static ClientLogger getOrCreateMethodLogger(String methodName) {
if (CALLER_METHOD_LOGGER_CACHE.size() > LOGGER_CACHE_MAX_SIZE) {
CALLER_METHOD_LOGGER_CACHE.clear();
}
return CALLER_METHOD_LOGGER_CACHE.computeIfAbsent(methodName, ClientLogger::new);
}
private static LoggingEventBuilder getLogBuilder(LogLevel logLevel, ClientLogger logger) {
switch (logLevel) {
case ERROR:
return logger.atError();
case WARNING:
return logger.atWarning();
case INFORMATIONAL:
return logger.atInfo();
case VERBOSE:
default:
return logger.atVerbose();
}
}
private static final class LoggingHttpResponse extends HttpResponse {
private final HttpResponse actualResponse;
private final LoggingEventBuilder logBuilder;
private final int contentLength;
private final ClientLogger logger;
private final boolean prettyPrintBody;
private final String contentTypeHeader;
private LoggingHttpResponse(HttpResponse actualResponse, LoggingEventBuilder logBuilder,
ClientLogger logger, int contentLength, String contentTypeHeader,
boolean prettyPrintBody) {
super(actualResponse.getRequest());
this.actualResponse = actualResponse;
this.logBuilder = logBuilder;
this.logger = logger;
this.contentLength = contentLength;
this.contentTypeHeader = contentTypeHeader;
this.prettyPrintBody = prettyPrintBody;
}
@Override
public int getStatusCode() {
return actualResponse.getStatusCode();
}
@Override
@Deprecated
public String getHeaderValue(String name) {
return actualResponse.getHeaderValue(name);
}
@Override
public String getHeaderValue(HttpHeaderName headerName) {
return actualResponse.getHeaderValue(headerName);
}
@Override
public HttpHeaders getHeaders() {
return actualResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
return Flux.using(() -> stream, s -> actualResponse.getBody()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> doLog(s.toString(StandardCharsets.UTF_8)));
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesFromNetworkResponse(getBody(), actualResponse.getHeaders());
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(String::new);
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BinaryData getBodyAsBinaryData() {
BinaryData content = actualResponse.getBodyAsBinaryData();
doLog(content.toString());
return content;
}
@Override
public void close() {
actualResponse.close();
}
private void doLog(String body) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY,
prettyPrintIfNeeded(logger, prettyPrintBody, contentTypeHeader, body))
.log(RESPONSE_LOG_MESSAGE);
}
}
} | class HttpLoggingPolicy implements HttpPipelinePolicy {
private static final ObjectMapperShim PRETTY_PRINTER = ObjectMapperShim.createPrettyPrintMapper();
private static final int MAX_BODY_LOG_SIZE = 1024 * 16;
private static final String REDACTED_PLACEHOLDER = "REDACTED";
private static final int LOGGER_CACHE_MAX_SIZE = 1000;
private static final Map<String, ClientLogger> CALLER_METHOD_LOGGER_CACHE = new ConcurrentHashMap<>();
private static final ClientLogger LOGGER = new ClientLogger(HttpLoggingPolicy.class);
private final HttpLogDetailLevel httpLogDetailLevel;
private final Set<String> allowedHeaderNames;
private final Set<String> allowedQueryParameterNames;
private final boolean prettyPrintBody;
private final HttpRequestLogger requestLogger;
private final HttpResponseLogger responseLogger;
/**
* Key for {@link Context} to pass request retry count metadata for logging.
*/
public static final String RETRY_COUNT_CONTEXT = "requestRetryCount";
private static final String REQUEST_LOG_MESSAGE = "HTTP request";
private static final String RESPONSE_LOG_MESSAGE = "HTTP response";
/**
* Creates an HttpLoggingPolicy with the given log configurations.
*
* @param httpLogOptions The HTTP logging configuration options.
*/
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.process();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
return requestLogger.logRequest(logger, getRequestLoggingOptions(context))
.then(next.process())
.flatMap(response -> responseLogger.logResponse(logger,
getResponseLoggingOptions(response, startNs, context)))
.doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable));
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
return next.processSync();
}
final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse(""));
final long startNs = System.nanoTime();
requestLogger.logRequestSync(logger, getRequestLoggingOptions(context));
try {
HttpResponse response = next.processSync();
if (response != null) {
response = responseLogger.logResponseSync(
logger, getResponseLoggingOptions(response, startNs, context));
}
return response;
} catch (RuntimeException e) {
logger.warning("<-- HTTP FAILED: ", e);
throw logger.logExceptionAsWarning(e);
}
}
private HttpRequestLoggingContext getRequestLoggingOptions(HttpPipelineCallContext callContext) {
return new HttpRequestLoggingContext(callContext.getHttpRequest(),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private HttpResponseLoggingContext getResponseLoggingOptions(HttpResponse httpResponse, long startNs,
HttpPipelineCallContext callContext) {
return new HttpResponseLoggingContext(httpResponse, Duration.ofNanos(System.nanoTime() - startNs),
callContext.getContext(),
getRequestRetryCount(callContext.getContext()));
}
private final class DefaultHttpRequestLogger implements HttpRequestLogger {
@Override
public Mono<Void> logRequest(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
return Mono.empty();
}
@Override
public void logRequestSync(ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
if (logger.canLogAtLevel(logLevel)) {
log(logLevel, logger, loggingOptions);
}
}
private void log(LogLevel logLevel, ClientLogger logger, HttpRequestLoggingContext loggingOptions) {
final HttpRequest request = loggingOptions.getHttpRequest();
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.HTTP_METHOD_KEY, request.getHttpMethod())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(request.getUrl(), allowedQueryParameterNames));
Integer retryCount = loggingOptions.getTryCount();
if (retryCount != null) {
logBuilder.addKeyValue(LoggingKeys.TRY_COUNT_KEY, retryCount);
}
}
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, request.getHeaders(), logBuilder);
}
if (request.getBody() == null) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, 0)
.log(REQUEST_LOG_MESSAGE);
return;
}
String contentType = request.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, request.getHeaders());
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLength);
if (httpLogDetailLevel.shouldLogBody() && shouldBodyBeLogged(contentType, contentLength)) {
logBody(request, (int) contentLength, logBuilder, logger, contentType);
return;
}
logBuilder.log(REQUEST_LOG_MESSAGE);
}
}
private void logBody(HttpRequest request, int contentLength, LoggingEventBuilder logBuilder, ClientLogger logger, String contentType) {
BinaryData data = request.getBodyAsBinaryData();
BinaryDataContent content = BinaryDataHelper.getContent(data);
if (content instanceof StringContent
|| content instanceof ByteBufferContent
|| content instanceof SerializableContent
|| content instanceof ByteArrayContent) {
logBody(logBuilder, logger, contentType, content.toString());
} else if (content instanceof InputStreamContent) {
byte[] contentBytes = content.toBytes();
request.setBody(contentBytes);
logBody(logBuilder, logger, contentType, new String(contentBytes, StandardCharsets.UTF_8));
} else {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
request.setBody(Flux.using(() -> stream, s -> content.toFluxByteBuffer()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> logBody(logBuilder, logger, contentType, s.toString(StandardCharsets.UTF_8))));
}
}
private void logBody(LoggingEventBuilder logBuilder, ClientLogger logger, String contentType, String data) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody, contentType, data))
.log(REQUEST_LOG_MESSAGE);
}
private final class DefaultHttpResponseLogger implements HttpResponseLogger {
@Override
public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return Mono.just(response);
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return Mono.just(new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody));
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return Mono.just(response);
}
private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder);
}
}
private void logUrl(HttpResponseLoggingContext loggingOptions, HttpResponse response,
LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder
.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis());
}
}
private void logContentLength(HttpResponse response, LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaderValue(HttpHeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) {
final LogLevel logLevel = getLogLevel(loggingOptions);
final HttpResponse response = loggingOptions.getHttpResponse();
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(loggingOptions, response, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse(response, logBuilder, logger,
(int) contentLength, contentTypeHeader, prettyPrintBody);
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
}
/*
* Generates the redacted URL for logging.
*
* @param url URL where the request is being sent.
* @return A URL with query parameters redacted based on configurations in this policy.
*/
private static String getRedactedUrl(URL url, Set<String> allowedQueryParameterNames) {
String query = url.getQuery();
if (CoreUtils.isNullOrEmpty(query)) {
return url.toString();
}
UrlBuilder urlBuilder = ImplUtils.parseUrl(url, false);
CoreUtils.parseQueryParameters(query).forEachRemaining(queryParam -> {
if (allowedQueryParameterNames.contains(queryParam.getKey().toLowerCase(Locale.ROOT))) {
urlBuilder.addQueryParameter(queryParam.getKey(), queryParam.getValue());
} else {
urlBuilder.addQueryParameter(queryParam.getKey(), REDACTED_PLACEHOLDER);
}
});
return urlBuilder.toString();
}
/*
* Adds HTTP headers into the StringBuilder that is generating the log message.
*
* @param headers HTTP headers on the request or response.
* @param sb StringBuilder that is generating the log message.
* @param logLevel Log level the environment is configured to use.
*/
private static void addHeadersToLogMessage(Set<String> allowedHeaderNames, HttpHeaders headers,
LoggingEventBuilder logBuilder) {
for (HttpHeader header : headers) {
String headerName = header.getName();
logBuilder.addKeyValue(headerName, allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))
? header.getValue() : REDACTED_PLACEHOLDER);
}
}
/*
* Determines and attempts to pretty print the body if it is JSON.
*
* <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p>
*
* @param logger Logger used to log a warning if the body fails to pretty print as JSON.
* @param contentType Content-Type header.
* @param body Body of the request or response.
* @return The body pretty printed if it is JSON, otherwise the unmodified body.
*/
private static String prettyPrintIfNeeded(ClientLogger logger, boolean prettyPrintBody, String contentType,
String body) {
String result = body;
if (prettyPrintBody && contentType != null
&& (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) {
try {
final Object deserialized = PRETTY_PRINTER.readTree(body);
result = PRETTY_PRINTER.writeValueAsString(deserialized);
} catch (Exception e) {
logger.warning("Failed to pretty print JSON", e);
}
}
return result;
}
/*
* Attempts to retrieve and parse the Content-Length header into a numeric representation.
*
* @param logger Logger used to log a warning if the Content-Length header is an invalid number.
* @param headers HTTP headers that are checked for containing Content-Length.
* @return
*/
private static long getContentLength(ClientLogger logger, HttpHeaders headers) {
long contentLength = 0;
String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH);
if (CoreUtils.isNullOrEmpty(contentLengthString)) {
return contentLength;
}
try {
contentLength = Long.parseLong(contentLengthString);
} catch (NumberFormatException | NullPointerException e) {
logger.warning("Could not parse the HTTP header content-length: '{}'.", contentLengthString, e);
}
return contentLength;
}
/*
* Determines if the request or response body should be logged.
*
* <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body
* isn't empty and is less than 16KB in size.</p>
*
* @param contentTypeHeader Content-Type header value.
* @param contentLength Content-Length header represented as a numeric.
* @return A flag indicating if the request or response body should be logged.
*/
private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader)
&& contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
}
/*
* Gets the request retry count to include in logging.
*
* If there is no value set, or it isn't a valid number null will be returned indicating that retry count won't be
* logged.
*/
private static Integer getRequestRetryCount(Context context) {
Object rawRetryCount = context.getData(RETRY_COUNT_CONTEXT).orElse(null);
if (rawRetryCount == null) {
return null;
}
try {
return Integer.valueOf(rawRetryCount.toString());
} catch (NumberFormatException ex) {
LOGGER.warning("Could not parse the request retry count: '{}'.", rawRetryCount);
return null;
}
}
/*
* Get or create the ClientLogger for the method having its request and response logged.
*/
private static ClientLogger getOrCreateMethodLogger(String methodName) {
if (CALLER_METHOD_LOGGER_CACHE.size() > LOGGER_CACHE_MAX_SIZE) {
CALLER_METHOD_LOGGER_CACHE.clear();
}
return CALLER_METHOD_LOGGER_CACHE.computeIfAbsent(methodName, ClientLogger::new);
}
private static LoggingEventBuilder getLogBuilder(LogLevel logLevel, ClientLogger logger) {
switch (logLevel) {
case ERROR:
return logger.atError();
case WARNING:
return logger.atWarning();
case INFORMATIONAL:
return logger.atInfo();
case VERBOSE:
default:
return logger.atVerbose();
}
}
private static final class LoggingHttpResponse extends HttpResponse {
private final HttpResponse actualResponse;
private final LoggingEventBuilder logBuilder;
private final int contentLength;
private final ClientLogger logger;
private final boolean prettyPrintBody;
private final String contentTypeHeader;
private LoggingHttpResponse(HttpResponse actualResponse, LoggingEventBuilder logBuilder,
ClientLogger logger, int contentLength, String contentTypeHeader,
boolean prettyPrintBody) {
super(actualResponse.getRequest());
this.actualResponse = actualResponse;
this.logBuilder = logBuilder;
this.logger = logger;
this.contentLength = contentLength;
this.contentTypeHeader = contentTypeHeader;
this.prettyPrintBody = prettyPrintBody;
}
@Override
public int getStatusCode() {
return actualResponse.getStatusCode();
}
@Override
@Deprecated
public String getHeaderValue(String name) {
return actualResponse.getHeaderValue(name);
}
@Override
public String getHeaderValue(HttpHeaderName headerName) {
return actualResponse.getHeaderValue(headerName);
}
@Override
public HttpHeaders getHeaders() {
return actualResponse.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength);
return Flux.using(() -> stream, s -> actualResponse.getBody()
.doOnNext(byteBuffer -> {
try {
ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s);
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}), s -> doLog(s.toString(StandardCharsets.UTF_8)));
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return FluxUtil.collectBytesFromNetworkResponse(getBody(), actualResponse.getHeaders());
}
@Override
public Mono<String> getBodyAsString() {
return getBodyAsByteArray().map(String::new);
}
@Override
public Mono<String> getBodyAsString(Charset charset) {
return getBodyAsByteArray().map(bytes -> new String(bytes, charset));
}
@Override
public BinaryData getBodyAsBinaryData() {
BinaryData content = actualResponse.getBodyAsBinaryData();
doLog(content.toString());
return content;
}
@Override
public void close() {
actualResponse.close();
}
private void doLog(String body) {
logBuilder.addKeyValue(LoggingKeys.BODY_KEY,
prettyPrintIfNeeded(logger, prettyPrintBody, contentTypeHeader, body))
.log(RESPONSE_LOG_MESSAGE);
}
}
} |
I don't think we actually need to do an upload here, we just care about the assert on 2930. | public void getNonEncodedBlobName(String originalBlobName) {
BlobClient client = cc.getBlobClient(originalBlobName);
BlockBlobClient blockBlobClient = cc.getBlobClient(client.getBlobName()).getBlockBlobClient();
assertEquals(blockBlobClient.getBlobName(), originalBlobName);
blockBlobClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSize());
String encodedName = Utility.urlEncode(originalBlobName);
assertTrue(cc.getBlobClient(originalBlobName).getBlobUrl().contains(encodedName));
} | blockBlobClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); | public void getNonEncodedBlobName(String originalBlobName) {
BlobClient client = cc.getBlobClient(originalBlobName);
BlockBlobClient blockBlobClient = cc.getBlobClient(client.getBlobName()).getBlockBlobClient();
assertEquals(blockBlobClient.getBlobName(), originalBlobName);
String encodedName = Utility.urlEncode(originalBlobName);
assertTrue(cc.getBlobClient(originalBlobName).getBlobUrl().contains(encodedName));
} | class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
} | class MockProgressListener implements ProgressListener {
List<Long> progresses = new ArrayList<>();
@Override
public void handleProgress(long progress) {
progresses.add(progress);
}
} |
Any reason there isn't a `syncNoFaultContainerClient` as well? | public BlobScenarioBase(TOptions options, TelemetryHelper telemetryHelper) {
super(options);
this.telemetryHelper = telemetryHelper;
String connectionString = options.getConnectionString();
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
BlobServiceClientBuilder clientBuilder = new BlobServiceClientBuilder()
.connectionString(connectionString)
.httpLogOptions(getLogOptions());
asyncNoFaultClient = clientBuilder.buildAsyncClient();
if (options.isFaultInjectionEnabled()) {
clientBuilder.httpClient(new HttpFaultInjectingHttpClient(
HttpClient.createDefault(), false, getFaultProbabilities()));
}
syncClient = clientBuilder.buildClient();
asyncClient = clientBuilder.buildAsyncClient();
asyncNoFaultContainerClient = asyncNoFaultClient.getBlobContainerAsyncClient(CONTAINER_NAME);
syncContainerClient = syncClient.getBlobContainerClient(CONTAINER_NAME);
asyncContainerClient = asyncClient.getBlobContainerAsyncClient(CONTAINER_NAME);
} | asyncNoFaultContainerClient = asyncNoFaultClient.getBlobContainerAsyncClient(CONTAINER_NAME); | public BlobScenarioBase(TOptions options, TelemetryHelper telemetryHelper) {
super(options);
this.telemetryHelper = telemetryHelper;
String connectionString = options.getConnectionString();
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
BlobServiceClientBuilder clientBuilder = new BlobServiceClientBuilder()
.connectionString(connectionString)
.httpLogOptions(getLogOptions());
asyncNoFaultClient = clientBuilder.buildAsyncClient();
if (options.isFaultInjectionEnabled()) {
clientBuilder.httpClient(new HttpFaultInjectingHttpClient(
HttpClient.createDefault(), false, getFaultProbabilities()));
}
syncClient = clientBuilder.buildClient();
asyncClient = clientBuilder.buildAsyncClient();
asyncNoFaultContainerClient = asyncNoFaultClient.getBlobContainerAsyncClient(CONTAINER_NAME);
syncContainerClient = syncClient.getBlobContainerClient(CONTAINER_NAME);
asyncContainerClient = asyncClient.getBlobContainerAsyncClient(CONTAINER_NAME);
} | class BlobScenarioBase<TOptions extends StorageStressOptions> extends PerfStressTest<TOptions> {
private static final String CONTAINER_NAME = "stress-" + UUID.randomUUID();
private final BlobServiceClient syncClient;
private final BlobServiceAsyncClient asyncClient;
private final BlobServiceAsyncClient asyncNoFaultClient;
private final BlobContainerClient syncContainerClient;
private final BlobContainerAsyncClient asyncContainerClient;
private final BlobContainerAsyncClient asyncNoFaultContainerClient;
private final TelemetryHelper telemetryHelper;
@Override
public Mono<Void> globalSetupAsync() {
telemetryHelper.logStart(options);
return super.globalSetupAsync()
.then(asyncNoFaultContainerClient.createIfNotExists())
.then();
}
@Override
public Mono<Void> globalCleanupAsync() {
telemetryHelper.logEnd();
return asyncNoFaultContainerClient.deleteIfExists()
.then(super.globalCleanupAsync());
}
@SuppressWarnings("try")
@Override
public void run() {
Context span = telemetryHelper.getTracer().start("run", Context.NONE);
try (AutoCloseable s = telemetryHelper.getTracer().makeSpanCurrent(span)) {
if (runInternal(span)) {
telemetryHelper.trackSuccess(span);
} else {
telemetryHelper.trackMismatch(span);
}
} catch (Exception e) {
if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) {
telemetryHelper.trackCancellation(span);
} else {
telemetryHelper.trackFailure(span, e);
}
}
}
@SuppressWarnings("try")
@Override
public Mono<Void> runAsync() {
Context span = telemetryHelper.getTracer().start("runAsync", Context.NONE);
try (AutoCloseable s = telemetryHelper.getTracer().makeSpanCurrent(span)) {
return runInternalAsync(span)
.doOnCancel(() -> telemetryHelper.trackCancellation(span))
.doOnError(e -> telemetryHelper.trackFailure(span, e))
.doOnNext(match -> {
if (match) {
telemetryHelper.trackSuccess(span);
} else {
telemetryHelper.trackMismatch(span);
}
})
.contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", span))
.then()
.onErrorResume(e -> Mono.empty());
} catch (Exception e) {
return Mono.empty();
}
}
protected abstract boolean runInternal(Context context);
protected abstract Mono<Boolean> runInternalAsync(Context context);
protected BlobContainerClient getSyncContainerClient() {
return syncContainerClient;
}
protected BlobContainerAsyncClient getAsyncContainerClient() {
return asyncContainerClient;
}
protected BlobContainerAsyncClient getAsyncContainerClientNoFault() {
return asyncNoFaultContainerClient;
}
private static HttpLogOptions getLogOptions() {
return new HttpLogOptions()
.setLogLevel(HttpLogDetailLevel.HEADERS)
.addAllowedHeaderName("x-ms-faultinjector-response-option")
.addAllowedHeaderName("Content-Range")
.addAllowedHeaderName("Accept-Ranges")
.addAllowedHeaderName("x-ms-blob-content-md5")
.addAllowedHeaderName("x-ms-error-code")
.addAllowedHeaderName("x-ms-range");
}
private static FaultInjectionProbabilities getFaultProbabilities() {
return new FaultInjectionProbabilities()
.setNoResponseIndefinite(0.003D)
.setNoResponseClose(0.004D)
.setNoResponseAbort(0.003D)
.setPartialResponseIndefinite(0.06)
.setPartialResponseClose(0.06)
.setPartialResponseAbort(0.06)
.setPartialResponseFinishNormal(0.06);
}
} | class BlobScenarioBase<TOptions extends StorageStressOptions> extends PerfStressTest<TOptions> {
private static final String CONTAINER_NAME = "stress-" + UUID.randomUUID();
private final BlobServiceClient syncClient;
private final BlobServiceAsyncClient asyncClient;
private final BlobServiceAsyncClient asyncNoFaultClient;
private final BlobContainerClient syncContainerClient;
private final BlobContainerAsyncClient asyncContainerClient;
private final BlobContainerAsyncClient asyncNoFaultContainerClient;
private final TelemetryHelper telemetryHelper;
@Override
public Mono<Void> globalSetupAsync() {
telemetryHelper.logStart(options);
return super.globalSetupAsync()
.then(asyncNoFaultContainerClient.createIfNotExists())
.then();
}
@Override
public Mono<Void> globalCleanupAsync() {
telemetryHelper.logEnd();
return asyncNoFaultContainerClient.deleteIfExists()
.then(super.globalCleanupAsync());
}
@SuppressWarnings("try")
@Override
public void run() {
Context span = telemetryHelper.getTracer().start("run", Context.NONE);
try (AutoCloseable s = telemetryHelper.getTracer().makeSpanCurrent(span)) {
if (runInternal(span)) {
telemetryHelper.trackSuccess(span);
} else {
telemetryHelper.trackMismatch(span);
}
} catch (Exception e) {
if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) {
telemetryHelper.trackCancellation(span);
} else {
telemetryHelper.trackFailure(span, e);
}
}
}
@SuppressWarnings("try")
@Override
public Mono<Void> runAsync() {
Context span = telemetryHelper.getTracer().start("runAsync", Context.NONE);
try (AutoCloseable s = telemetryHelper.getTracer().makeSpanCurrent(span)) {
return runInternalAsync(span)
.doOnCancel(() -> telemetryHelper.trackCancellation(span))
.doOnError(e -> telemetryHelper.trackFailure(span, e))
.doOnNext(match -> {
if (match) {
telemetryHelper.trackSuccess(span);
} else {
telemetryHelper.trackMismatch(span);
}
})
.contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", span))
.then()
.onErrorResume(e -> Mono.empty());
} catch (Exception e) {
return Mono.empty();
}
}
protected abstract boolean runInternal(Context context);
protected abstract Mono<Boolean> runInternalAsync(Context context);
protected BlobContainerClient getSyncContainerClient() {
return syncContainerClient;
}
protected BlobContainerAsyncClient getAsyncContainerClient() {
return asyncContainerClient;
}
protected BlobContainerAsyncClient getAsyncContainerClientNoFault() {
return asyncNoFaultContainerClient;
}
private static HttpLogOptions getLogOptions() {
return new HttpLogOptions()
.setLogLevel(HttpLogDetailLevel.HEADERS)
.addAllowedHeaderName("x-ms-faultinjector-response-option")
.addAllowedHeaderName("Content-Range")
.addAllowedHeaderName("Accept-Ranges")
.addAllowedHeaderName("x-ms-blob-content-md5")
.addAllowedHeaderName("x-ms-error-code")
.addAllowedHeaderName("x-ms-range");
}
private static FaultInjectionProbabilities getFaultProbabilities() {
return new FaultInjectionProbabilities()
.setNoResponseIndefinite(0.003D)
.setNoResponseClose(0.004D)
.setNoResponseAbort(0.003D)
.setPartialResponseIndefinite(0.06)
.setPartialResponseClose(0.06)
.setPartialResponseAbort(0.06)
.setPartialResponseFinishNormal(0.06);
}
} |
nit: let's use `Mono.using` here as it's better for operations with resource cleanup. ```suggestion return Mono.using(() -> downloadPath, ignored -> asyncClient.downloadToFileWithResponse(blobOptions), path -> deleteFile(path)); ``` | protected Mono<Boolean> runInternalAsync(Context span) {
Path downloadPath = directoryPath.resolve(UUID.randomUUID() + ".txt");
BlobDownloadToFileOptions blobOptions = new BlobDownloadToFileOptions(downloadPath.toString());
return asyncClient
.downloadToFileWithResponse(blobOptions)
.flatMap(ignored -> validateDownloadedContentsAsync(downloadPath, span))
.doFinally(i -> deleteFile(downloadPath));
} | .doFinally(i -> deleteFile(downloadPath)); | protected Mono<Boolean> runInternalAsync(Context span) {
return Mono.using(
() -> directoryPath.resolve(UUID.randomUUID() + ".txt"),
path -> asyncClient.downloadToFileWithResponse(new BlobDownloadToFileOptions(path.toString()))
.flatMap(ignored -> ORIGINAL_CONTENT.checkMatch(path, span)),
path -> deleteFile(path));
} | class DownloadToFile extends BlobScenarioBase<StorageStressOptions> {
private static final ClientLogger LOGGER = new ClientLogger(DownloadToFile.class);
private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(DownloadToFile.class.getName());
private final Path directoryPath;
private final int blobPrintableSize;
private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent();
private final BlobClient syncClient;
private final BlobAsyncClient asyncClient;
private final BlobAsyncClient asyncNoFaultClient;
public DownloadToFile(StorageStressOptions options) {
super(options, TELEMETRY_HELPER);
this.directoryPath = getTempPath("test");
this.blobPrintableSize = (int) Math.min(options.getSize(), 1024);
this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName());
this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName());
this.asyncClient = getAsyncContainerClient().getBlobAsyncClient(options.getBlobName());
}
@Override
protected boolean runInternal(Context span) {
Path downloadPath = directoryPath.resolve(UUID.randomUUID() + ".txt");
BlobDownloadToFileOptions blobOptions = new BlobDownloadToFileOptions(downloadPath.toString());
try {
syncClient.downloadToFileWithResponse(blobOptions, Duration.ofSeconds(options.getDuration()), span);
return validateDownloadedContents(downloadPath);
} finally {
deleteFile(downloadPath);
}
}
@Override
private static void deleteFile(Path path) {
try {
path.toFile().delete();
} catch (Exception e) {
LOGGER.atInfo()
.addKeyValue("path", path)
.log("failed to delete file", e);
}
}
private boolean validateDownloadedContents(Path downloadPath) {
long length = 0;
ByteBuffer contentHead = ByteBuffer.allocate(blobPrintableSize);
CRC32 dataCrc = new CRC32();
try (InputStream file = Files.newInputStream(downloadPath)) {
byte[] buf = new byte[4 * 1024 * 1024];
int read;
while ((read = file.read(buf)) != -1) {
dataCrc.update(buf, 0, read);
if (contentHead.hasRemaining()) {
contentHead.put(buf, 0, Math.min(read, contentHead.remaining()));
}
length += read;
}
}
catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
return ORIGINAL_CONTENT.checkMatch(dataCrc, length, contentHead);
}
private Mono<Boolean> validateDownloadedContentsAsync(Path downloadPath, Context span) {
CRC32 dataCrc = new CRC32();
ByteBuffer contentHead = ByteBuffer.allocate(blobPrintableSize);
return BinaryData.fromFile(downloadPath).toFluxByteBuffer()
.map(bb -> {
long length = bb.remaining();
dataCrc.update(bb);
if (contentHead.hasRemaining()) {
bb.flip();
while (contentHead.hasRemaining() && bb.hasRemaining()) {
contentHead.put(bb.get());
}
}
return length;
})
.reduce(0L, Long::sum)
.map(l -> {
try(AutoCloseable scope = TELEMETRY_HELPER.getTracer().makeSpanCurrent(span)) {
return ORIGINAL_CONTENT.checkMatch(dataCrc, l, contentHead);
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
});
}
@Override
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize(), blobPrintableSize));
}
@Override
public Mono<Void> globalCleanupAsync() {
return asyncNoFaultClient.delete()
.then(super.globalCleanupAsync());
}
private Path getTempPath(String prefix) {
try {
return Files.createTempDirectory(prefix);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
} | class DownloadToFile extends BlobScenarioBase<StorageStressOptions> {
private static final ClientLogger LOGGER = new ClientLogger(DownloadToFile.class);
private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(DownloadToFile.class);
private final Path directoryPath;
private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent();
private final BlobClient syncClient;
private final BlobAsyncClient asyncClient;
private final BlobAsyncClient asyncNoFaultClient;
public DownloadToFile(StorageStressOptions options) {
super(options, TELEMETRY_HELPER);
this.directoryPath = getTempPath("test");
this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName());
this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName());
this.asyncClient = getAsyncContainerClient().getBlobAsyncClient(options.getBlobName());
}
@Override
protected boolean runInternal(Context span) {
Path downloadPath = directoryPath.resolve(UUID.randomUUID() + ".txt");
BlobDownloadToFileOptions blobOptions = new BlobDownloadToFileOptions(downloadPath.toString());
try {
syncClient.downloadToFileWithResponse(blobOptions, Duration.ofSeconds(options.getDuration()), span);
return ORIGINAL_CONTENT.checkMatch(downloadPath, span).block();
} finally {
deleteFile(downloadPath);
}
}
@Override
private static void deleteFile(Path path) {
try {
path.toFile().delete();
} catch (Exception e) {
LOGGER.atInfo()
.addKeyValue("path", path)
.log("failed to delete file", e);
}
}
@Override
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize()));
}
@Override
public Mono<Void> globalCleanupAsync() {
return asyncNoFaultClient.delete()
.then(super.globalCleanupAsync());
}
private Path getTempPath(String prefix) {
try {
return Files.createTempDirectory(prefix);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
} |
`Math.min(options.getSize(), 1024)` is `long` and is guaranteed to be less than `1024`, so no overflow should be expected. Currently first 1kb of content is only printed when mismatch is detected (which never happens thanks to your recent fixes). Mismatched content is a severe issue and if it happens we probably need to go and fix it asap anyway. Note, with stress tests we're not really testing perf, but perf in the presence of network/other issues, so micro-optimization is not necessary. | public DownloadToFile(StorageStressOptions options) {
super(options, TELEMETRY_HELPER);
this.directoryPath = getTempPath("test");
this.blobPrintableSize = (int) Math.min(options.getSize(), 1024);
this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName());
this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName());
this.asyncClient = getAsyncContainerClient().getBlobAsyncClient(options.getBlobName());
} | this.blobPrintableSize = (int) Math.min(options.getSize(), 1024); | public DownloadToFile(StorageStressOptions options) {
super(options, TELEMETRY_HELPER);
this.directoryPath = getTempPath("test");
this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName());
this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName());
this.asyncClient = getAsyncContainerClient().getBlobAsyncClient(options.getBlobName());
} | class DownloadToFile extends BlobScenarioBase<StorageStressOptions> {
private static final ClientLogger LOGGER = new ClientLogger(DownloadToFile.class);
private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(DownloadToFile.class.getName());
private final Path directoryPath;
private final int blobPrintableSize;
private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent();
private final BlobClient syncClient;
private final BlobAsyncClient asyncClient;
private final BlobAsyncClient asyncNoFaultClient;
@Override
protected boolean runInternal(Context span) {
Path downloadPath = directoryPath.resolve(UUID.randomUUID() + ".txt");
BlobDownloadToFileOptions blobOptions = new BlobDownloadToFileOptions(downloadPath.toString());
try {
syncClient.downloadToFileWithResponse(blobOptions, Duration.ofSeconds(options.getDuration()), span);
return validateDownloadedContents(downloadPath);
} finally {
deleteFile(downloadPath);
}
}
@Override
protected Mono<Boolean> runInternalAsync(Context span) {
Path downloadPath = directoryPath.resolve(UUID.randomUUID() + ".txt");
BlobDownloadToFileOptions blobOptions = new BlobDownloadToFileOptions(downloadPath.toString());
return asyncClient
.downloadToFileWithResponse(blobOptions)
.flatMap(ignored -> validateDownloadedContentsAsync(downloadPath, span))
.doFinally(i -> deleteFile(downloadPath));
}
private static void deleteFile(Path path) {
try {
path.toFile().delete();
} catch (Exception e) {
LOGGER.atInfo()
.addKeyValue("path", path)
.log("failed to delete file", e);
}
}
private boolean validateDownloadedContents(Path downloadPath) {
long length = 0;
ByteBuffer contentHead = ByteBuffer.allocate(blobPrintableSize);
CRC32 dataCrc = new CRC32();
try (InputStream file = Files.newInputStream(downloadPath)) {
byte[] buf = new byte[4 * 1024 * 1024];
int read;
while ((read = file.read(buf)) != -1) {
dataCrc.update(buf, 0, read);
if (contentHead.hasRemaining()) {
contentHead.put(buf, 0, Math.min(read, contentHead.remaining()));
}
length += read;
}
}
catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
return ORIGINAL_CONTENT.checkMatch(dataCrc, length, contentHead);
}
private Mono<Boolean> validateDownloadedContentsAsync(Path downloadPath, Context span) {
CRC32 dataCrc = new CRC32();
ByteBuffer contentHead = ByteBuffer.allocate(blobPrintableSize);
return BinaryData.fromFile(downloadPath).toFluxByteBuffer()
.map(bb -> {
long length = bb.remaining();
dataCrc.update(bb);
if (contentHead.hasRemaining()) {
bb.flip();
while (contentHead.hasRemaining() && bb.hasRemaining()) {
contentHead.put(bb.get());
}
}
return length;
})
.reduce(0L, Long::sum)
.map(l -> {
try(AutoCloseable scope = TELEMETRY_HELPER.getTracer().makeSpanCurrent(span)) {
return ORIGINAL_CONTENT.checkMatch(dataCrc, l, contentHead);
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
});
}
@Override
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize(), blobPrintableSize));
}
@Override
public Mono<Void> globalCleanupAsync() {
return asyncNoFaultClient.delete()
.then(super.globalCleanupAsync());
}
private Path getTempPath(String prefix) {
try {
return Files.createTempDirectory(prefix);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
} | class DownloadToFile extends BlobScenarioBase<StorageStressOptions> {
private static final ClientLogger LOGGER = new ClientLogger(DownloadToFile.class);
private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(DownloadToFile.class);
private final Path directoryPath;
private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent();
private final BlobClient syncClient;
private final BlobAsyncClient asyncClient;
private final BlobAsyncClient asyncNoFaultClient;
@Override
protected boolean runInternal(Context span) {
Path downloadPath = directoryPath.resolve(UUID.randomUUID() + ".txt");
BlobDownloadToFileOptions blobOptions = new BlobDownloadToFileOptions(downloadPath.toString());
try {
syncClient.downloadToFileWithResponse(blobOptions, Duration.ofSeconds(options.getDuration()), span);
return ORIGINAL_CONTENT.checkMatch(downloadPath, span).block();
} finally {
deleteFile(downloadPath);
}
}
@Override
protected Mono<Boolean> runInternalAsync(Context span) {
return Mono.using(
() -> directoryPath.resolve(UUID.randomUUID() + ".txt"),
path -> asyncClient.downloadToFileWithResponse(new BlobDownloadToFileOptions(path.toString()))
.flatMap(ignored -> ORIGINAL_CONTENT.checkMatch(path, span)),
path -> deleteFile(path));
}
private static void deleteFile(Path path) {
try {
path.toFile().delete();
} catch (Exception e) {
LOGGER.atInfo()
.addKeyValue("path", path)
.log("failed to delete file", e);
}
}
@Override
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize()));
}
@Override
public Mono<Void> globalCleanupAsync() {
return asyncNoFaultClient.delete()
.then(super.globalCleanupAsync());
}
private Path getTempPath(String prefix) {
try {
return Files.createTempDirectory(prefix);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
} |
Is `options.getSize()` tied to the configuration `sizeBytes`? If so, this will fail if/when huge blobs are tested (>2GB), or will be a performance issue unrelated to the SDK which might create stress testing noise when validating content. | public DownloadToFile(StorageStressOptions options) {
super(options, TELEMETRY_HELPER);
this.directoryPath = getTempPath("test");
this.blobPrintableSize = (int) Math.min(options.getSize(), 1024);
this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName());
this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName());
this.asyncClient = getAsyncContainerClient().getBlobAsyncClient(options.getBlobName());
} | this.blobPrintableSize = (int) Math.min(options.getSize(), 1024); | public DownloadToFile(StorageStressOptions options) {
super(options, TELEMETRY_HELPER);
this.directoryPath = getTempPath("test");
this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName());
this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName());
this.asyncClient = getAsyncContainerClient().getBlobAsyncClient(options.getBlobName());
} | class DownloadToFile extends BlobScenarioBase<StorageStressOptions> {
private static final ClientLogger LOGGER = new ClientLogger(DownloadToFile.class);
private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(DownloadToFile.class.getName());
private final Path directoryPath;
private final int blobPrintableSize;
private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent();
private final BlobClient syncClient;
private final BlobAsyncClient asyncClient;
private final BlobAsyncClient asyncNoFaultClient;
@Override
protected boolean runInternal(Context span) {
Path downloadPath = directoryPath.resolve(UUID.randomUUID() + ".txt");
BlobDownloadToFileOptions blobOptions = new BlobDownloadToFileOptions(downloadPath.toString());
try {
syncClient.downloadToFileWithResponse(blobOptions, Duration.ofSeconds(options.getDuration()), span);
return validateDownloadedContents(downloadPath);
} finally {
deleteFile(downloadPath);
}
}
@Override
protected Mono<Boolean> runInternalAsync(Context span) {
Path downloadPath = directoryPath.resolve(UUID.randomUUID() + ".txt");
BlobDownloadToFileOptions blobOptions = new BlobDownloadToFileOptions(downloadPath.toString());
return asyncClient
.downloadToFileWithResponse(blobOptions)
.flatMap(ignored -> validateDownloadedContentsAsync(downloadPath, span))
.doFinally(i -> deleteFile(downloadPath));
}
private static void deleteFile(Path path) {
try {
path.toFile().delete();
} catch (Exception e) {
LOGGER.atInfo()
.addKeyValue("path", path)
.log("failed to delete file", e);
}
}
private boolean validateDownloadedContents(Path downloadPath) {
long length = 0;
ByteBuffer contentHead = ByteBuffer.allocate(blobPrintableSize);
CRC32 dataCrc = new CRC32();
try (InputStream file = Files.newInputStream(downloadPath)) {
byte[] buf = new byte[4 * 1024 * 1024];
int read;
while ((read = file.read(buf)) != -1) {
dataCrc.update(buf, 0, read);
if (contentHead.hasRemaining()) {
contentHead.put(buf, 0, Math.min(read, contentHead.remaining()));
}
length += read;
}
}
catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
return ORIGINAL_CONTENT.checkMatch(dataCrc, length, contentHead);
}
private Mono<Boolean> validateDownloadedContentsAsync(Path downloadPath, Context span) {
CRC32 dataCrc = new CRC32();
ByteBuffer contentHead = ByteBuffer.allocate(blobPrintableSize);
return BinaryData.fromFile(downloadPath).toFluxByteBuffer()
.map(bb -> {
long length = bb.remaining();
dataCrc.update(bb);
if (contentHead.hasRemaining()) {
bb.flip();
while (contentHead.hasRemaining() && bb.hasRemaining()) {
contentHead.put(bb.get());
}
}
return length;
})
.reduce(0L, Long::sum)
.map(l -> {
try(AutoCloseable scope = TELEMETRY_HELPER.getTracer().makeSpanCurrent(span)) {
return ORIGINAL_CONTENT.checkMatch(dataCrc, l, contentHead);
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
});
}
@Override
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize(), blobPrintableSize));
}
@Override
public Mono<Void> globalCleanupAsync() {
return asyncNoFaultClient.delete()
.then(super.globalCleanupAsync());
}
private Path getTempPath(String prefix) {
try {
return Files.createTempDirectory(prefix);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
} | class DownloadToFile extends BlobScenarioBase<StorageStressOptions> {
private static final ClientLogger LOGGER = new ClientLogger(DownloadToFile.class);
private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(DownloadToFile.class);
private final Path directoryPath;
private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent();
private final BlobClient syncClient;
private final BlobAsyncClient asyncClient;
private final BlobAsyncClient asyncNoFaultClient;
@Override
protected boolean runInternal(Context span) {
Path downloadPath = directoryPath.resolve(UUID.randomUUID() + ".txt");
BlobDownloadToFileOptions blobOptions = new BlobDownloadToFileOptions(downloadPath.toString());
try {
syncClient.downloadToFileWithResponse(blobOptions, Duration.ofSeconds(options.getDuration()), span);
return ORIGINAL_CONTENT.checkMatch(downloadPath, span).block();
} finally {
deleteFile(downloadPath);
}
}
@Override
protected Mono<Boolean> runInternalAsync(Context span) {
return Mono.using(
() -> directoryPath.resolve(UUID.randomUUID() + ".txt"),
path -> asyncClient.downloadToFileWithResponse(new BlobDownloadToFileOptions(path.toString()))
.flatMap(ignored -> ORIGINAL_CONTENT.checkMatch(path, span)),
path -> deleteFile(path));
}
private static void deleteFile(Path path) {
try {
path.toFile().delete();
} catch (Exception e) {
LOGGER.atInfo()
.addKeyValue("path", path)
.log("failed to delete file", e);
}
}
@Override
public Mono<Void> globalSetupAsync() {
return super.globalSetupAsync()
.then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize()));
}
@Override
public Mono<Void> globalCleanupAsync() {
return asyncNoFaultClient.delete()
.then(super.globalCleanupAsync());
}
private Path getTempPath(String prefix) {
try {
return Files.createTempDirectory(prefix);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
} |
yep, no need - no fault client is used for setup/cleanup only and they are async | public BlobScenarioBase(TOptions options, TelemetryHelper telemetryHelper) {
super(options);
this.telemetryHelper = telemetryHelper;
String connectionString = options.getConnectionString();
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
BlobServiceClientBuilder clientBuilder = new BlobServiceClientBuilder()
.connectionString(connectionString)
.httpLogOptions(getLogOptions());
asyncNoFaultClient = clientBuilder.buildAsyncClient();
if (options.isFaultInjectionEnabled()) {
clientBuilder.httpClient(new HttpFaultInjectingHttpClient(
HttpClient.createDefault(), false, getFaultProbabilities()));
}
syncClient = clientBuilder.buildClient();
asyncClient = clientBuilder.buildAsyncClient();
asyncNoFaultContainerClient = asyncNoFaultClient.getBlobContainerAsyncClient(CONTAINER_NAME);
syncContainerClient = syncClient.getBlobContainerClient(CONTAINER_NAME);
asyncContainerClient = asyncClient.getBlobContainerAsyncClient(CONTAINER_NAME);
} | asyncNoFaultContainerClient = asyncNoFaultClient.getBlobContainerAsyncClient(CONTAINER_NAME); | public BlobScenarioBase(TOptions options, TelemetryHelper telemetryHelper) {
super(options);
this.telemetryHelper = telemetryHelper;
String connectionString = options.getConnectionString();
Objects.requireNonNull(connectionString, "'connectionString' cannot be null.");
BlobServiceClientBuilder clientBuilder = new BlobServiceClientBuilder()
.connectionString(connectionString)
.httpLogOptions(getLogOptions());
asyncNoFaultClient = clientBuilder.buildAsyncClient();
if (options.isFaultInjectionEnabled()) {
clientBuilder.httpClient(new HttpFaultInjectingHttpClient(
HttpClient.createDefault(), false, getFaultProbabilities()));
}
syncClient = clientBuilder.buildClient();
asyncClient = clientBuilder.buildAsyncClient();
asyncNoFaultContainerClient = asyncNoFaultClient.getBlobContainerAsyncClient(CONTAINER_NAME);
syncContainerClient = syncClient.getBlobContainerClient(CONTAINER_NAME);
asyncContainerClient = asyncClient.getBlobContainerAsyncClient(CONTAINER_NAME);
} | class BlobScenarioBase<TOptions extends StorageStressOptions> extends PerfStressTest<TOptions> {
private static final String CONTAINER_NAME = "stress-" + UUID.randomUUID();
private final BlobServiceClient syncClient;
private final BlobServiceAsyncClient asyncClient;
private final BlobServiceAsyncClient asyncNoFaultClient;
private final BlobContainerClient syncContainerClient;
private final BlobContainerAsyncClient asyncContainerClient;
private final BlobContainerAsyncClient asyncNoFaultContainerClient;
private final TelemetryHelper telemetryHelper;
@Override
public Mono<Void> globalSetupAsync() {
telemetryHelper.logStart(options);
return super.globalSetupAsync()
.then(asyncNoFaultContainerClient.createIfNotExists())
.then();
}
@Override
public Mono<Void> globalCleanupAsync() {
telemetryHelper.logEnd();
return asyncNoFaultContainerClient.deleteIfExists()
.then(super.globalCleanupAsync());
}
@SuppressWarnings("try")
@Override
public void run() {
Context span = telemetryHelper.getTracer().start("run", Context.NONE);
try (AutoCloseable s = telemetryHelper.getTracer().makeSpanCurrent(span)) {
if (runInternal(span)) {
telemetryHelper.trackSuccess(span);
} else {
telemetryHelper.trackMismatch(span);
}
} catch (Exception e) {
if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) {
telemetryHelper.trackCancellation(span);
} else {
telemetryHelper.trackFailure(span, e);
}
}
}
@SuppressWarnings("try")
@Override
public Mono<Void> runAsync() {
Context span = telemetryHelper.getTracer().start("runAsync", Context.NONE);
try (AutoCloseable s = telemetryHelper.getTracer().makeSpanCurrent(span)) {
return runInternalAsync(span)
.doOnCancel(() -> telemetryHelper.trackCancellation(span))
.doOnError(e -> telemetryHelper.trackFailure(span, e))
.doOnNext(match -> {
if (match) {
telemetryHelper.trackSuccess(span);
} else {
telemetryHelper.trackMismatch(span);
}
})
.contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", span))
.then()
.onErrorResume(e -> Mono.empty());
} catch (Exception e) {
return Mono.empty();
}
}
protected abstract boolean runInternal(Context context);
protected abstract Mono<Boolean> runInternalAsync(Context context);
protected BlobContainerClient getSyncContainerClient() {
return syncContainerClient;
}
protected BlobContainerAsyncClient getAsyncContainerClient() {
return asyncContainerClient;
}
protected BlobContainerAsyncClient getAsyncContainerClientNoFault() {
return asyncNoFaultContainerClient;
}
private static HttpLogOptions getLogOptions() {
return new HttpLogOptions()
.setLogLevel(HttpLogDetailLevel.HEADERS)
.addAllowedHeaderName("x-ms-faultinjector-response-option")
.addAllowedHeaderName("Content-Range")
.addAllowedHeaderName("Accept-Ranges")
.addAllowedHeaderName("x-ms-blob-content-md5")
.addAllowedHeaderName("x-ms-error-code")
.addAllowedHeaderName("x-ms-range");
}
private static FaultInjectionProbabilities getFaultProbabilities() {
return new FaultInjectionProbabilities()
.setNoResponseIndefinite(0.003D)
.setNoResponseClose(0.004D)
.setNoResponseAbort(0.003D)
.setPartialResponseIndefinite(0.06)
.setPartialResponseClose(0.06)
.setPartialResponseAbort(0.06)
.setPartialResponseFinishNormal(0.06);
}
} | class BlobScenarioBase<TOptions extends StorageStressOptions> extends PerfStressTest<TOptions> {
private static final String CONTAINER_NAME = "stress-" + UUID.randomUUID();
private final BlobServiceClient syncClient;
private final BlobServiceAsyncClient asyncClient;
private final BlobServiceAsyncClient asyncNoFaultClient;
private final BlobContainerClient syncContainerClient;
private final BlobContainerAsyncClient asyncContainerClient;
private final BlobContainerAsyncClient asyncNoFaultContainerClient;
private final TelemetryHelper telemetryHelper;
@Override
public Mono<Void> globalSetupAsync() {
telemetryHelper.logStart(options);
return super.globalSetupAsync()
.then(asyncNoFaultContainerClient.createIfNotExists())
.then();
}
@Override
public Mono<Void> globalCleanupAsync() {
telemetryHelper.logEnd();
return asyncNoFaultContainerClient.deleteIfExists()
.then(super.globalCleanupAsync());
}
@SuppressWarnings("try")
@Override
public void run() {
Context span = telemetryHelper.getTracer().start("run", Context.NONE);
try (AutoCloseable s = telemetryHelper.getTracer().makeSpanCurrent(span)) {
if (runInternal(span)) {
telemetryHelper.trackSuccess(span);
} else {
telemetryHelper.trackMismatch(span);
}
} catch (Exception e) {
if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) {
telemetryHelper.trackCancellation(span);
} else {
telemetryHelper.trackFailure(span, e);
}
}
}
@SuppressWarnings("try")
@Override
public Mono<Void> runAsync() {
Context span = telemetryHelper.getTracer().start("runAsync", Context.NONE);
try (AutoCloseable s = telemetryHelper.getTracer().makeSpanCurrent(span)) {
return runInternalAsync(span)
.doOnCancel(() -> telemetryHelper.trackCancellation(span))
.doOnError(e -> telemetryHelper.trackFailure(span, e))
.doOnNext(match -> {
if (match) {
telemetryHelper.trackSuccess(span);
} else {
telemetryHelper.trackMismatch(span);
}
})
.contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", span))
.then()
.onErrorResume(e -> Mono.empty());
} catch (Exception e) {
return Mono.empty();
}
}
protected abstract boolean runInternal(Context context);
protected abstract Mono<Boolean> runInternalAsync(Context context);
protected BlobContainerClient getSyncContainerClient() {
return syncContainerClient;
}
protected BlobContainerAsyncClient getAsyncContainerClient() {
return asyncContainerClient;
}
protected BlobContainerAsyncClient getAsyncContainerClientNoFault() {
return asyncNoFaultContainerClient;
}
private static HttpLogOptions getLogOptions() {
return new HttpLogOptions()
.setLogLevel(HttpLogDetailLevel.HEADERS)
.addAllowedHeaderName("x-ms-faultinjector-response-option")
.addAllowedHeaderName("Content-Range")
.addAllowedHeaderName("Accept-Ranges")
.addAllowedHeaderName("x-ms-blob-content-md5")
.addAllowedHeaderName("x-ms-error-code")
.addAllowedHeaderName("x-ms-range");
}
private static FaultInjectionProbabilities getFaultProbabilities() {
return new FaultInjectionProbabilities()
.setNoResponseIndefinite(0.003D)
.setNoResponseClose(0.004D)
.setNoResponseAbort(0.003D)
.setPartialResponseIndefinite(0.06)
.setPartialResponseClose(0.06)
.setPartialResponseAbort(0.06)
.setPartialResponseFinishNormal(0.06);
}
} |
Do we need this in test? If possible, don't disable anything about encryption. | public void tesCreateChaosTarget() {
Target target = null;
String kvName = "kv" + randomPadding();
try {
keyVaultManager.vaults()
.define(kvName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withRoleBasedAccessControl()
.withSku(SkuName.STANDARD)
.withDiskEncryptionDisabled()
.withTemplateDeploymentDisabled()
.create();
target = chaosManager.targets()
.createOrUpdate(
resourceGroupName,
"microsoft.keyvault",
"vaults",
kvName,
"microsoft-keyvault",
new TargetInner()
.withLocation(REGION.name())
.withProperties(Collections.emptyMap())
);
Assertions.assertEquals(target.name(), "microsoft-keyvault");
Assertions.assertTrue(Objects.nonNull(chaosManager.targets().get(
resourceGroupName, "microsoft.keyvault",
"vaults", kvName, "microsoft-keyvault")));
Assertions.assertTrue(chaosManager.targets().list(
resourceGroupName, "microsoft.keyvault",
"vaults", kvName)
.stream().findAny().isPresent());
} finally {
if (target != null) {
chaosManager.targets().delete(resourceGroupName,
"microsoft.keyvault",
"vaults",
kvName,
"microsoft-keyvault");
}
}
} | .withDiskEncryptionDisabled() | public void tesCreateChaosTarget() {
Target target = null;
String kvName = "kv" + randomPadding();
try {
keyVaultManager.vaults()
.define(kvName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withRoleBasedAccessControl()
.withSku(SkuName.STANDARD)
.create();
target = chaosManager.targets()
.createOrUpdate(
resourceGroupName,
"microsoft.keyvault",
"vaults",
kvName,
"microsoft-keyvault",
new TargetInner()
.withLocation(REGION.name())
.withProperties(Collections.emptyMap())
);
Assertions.assertEquals(target.name(), "microsoft-keyvault");
Assertions.assertTrue(Objects.nonNull(chaosManager.targets().get(
resourceGroupName, "microsoft.keyvault",
"vaults", kvName, "microsoft-keyvault")));
Assertions.assertTrue(chaosManager.targets().list(
resourceGroupName, "microsoft.keyvault",
"vaults", kvName)
.stream().findAny().isPresent());
} finally {
if (target != null) {
chaosManager.targets().delete(resourceGroupName,
"microsoft.keyvault",
"vaults",
kvName,
"microsoft-keyvault");
}
}
} | class ChaosManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ChaosManager chaosManager;
private KeyVaultManager keyVaultManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
chaosManager = ChaosManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.authenticate(credential, profile);
keyVaultManager = KeyVaultManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class ChaosManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ChaosManager chaosManager;
private KeyVaultManager keyVaultManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
chaosManager = ChaosManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.authenticate(credential, profile);
keyVaultManager = KeyVaultManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
Fixed in the new version. | public void tesCreateChaosTarget() {
Target target = null;
String kvName = "kv" + randomPadding();
try {
keyVaultManager.vaults()
.define(kvName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withRoleBasedAccessControl()
.withSku(SkuName.STANDARD)
.withDiskEncryptionDisabled()
.withTemplateDeploymentDisabled()
.create();
target = chaosManager.targets()
.createOrUpdate(
resourceGroupName,
"microsoft.keyvault",
"vaults",
kvName,
"microsoft-keyvault",
new TargetInner()
.withLocation(REGION.name())
.withProperties(Collections.emptyMap())
);
Assertions.assertEquals(target.name(), "microsoft-keyvault");
Assertions.assertTrue(Objects.nonNull(chaosManager.targets().get(
resourceGroupName, "microsoft.keyvault",
"vaults", kvName, "microsoft-keyvault")));
Assertions.assertTrue(chaosManager.targets().list(
resourceGroupName, "microsoft.keyvault",
"vaults", kvName)
.stream().findAny().isPresent());
} finally {
if (target != null) {
chaosManager.targets().delete(resourceGroupName,
"microsoft.keyvault",
"vaults",
kvName,
"microsoft-keyvault");
}
}
} | .withDiskEncryptionDisabled() | public void tesCreateChaosTarget() {
Target target = null;
String kvName = "kv" + randomPadding();
try {
keyVaultManager.vaults()
.define(kvName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroupName)
.withRoleBasedAccessControl()
.withSku(SkuName.STANDARD)
.create();
target = chaosManager.targets()
.createOrUpdate(
resourceGroupName,
"microsoft.keyvault",
"vaults",
kvName,
"microsoft-keyvault",
new TargetInner()
.withLocation(REGION.name())
.withProperties(Collections.emptyMap())
);
Assertions.assertEquals(target.name(), "microsoft-keyvault");
Assertions.assertTrue(Objects.nonNull(chaosManager.targets().get(
resourceGroupName, "microsoft.keyvault",
"vaults", kvName, "microsoft-keyvault")));
Assertions.assertTrue(chaosManager.targets().list(
resourceGroupName, "microsoft.keyvault",
"vaults", kvName)
.stream().findAny().isPresent());
} finally {
if (target != null) {
chaosManager.targets().delete(resourceGroupName,
"microsoft.keyvault",
"vaults",
kvName,
"microsoft-keyvault");
}
}
} | class ChaosManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ChaosManager chaosManager;
private KeyVaultManager keyVaultManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
chaosManager = ChaosManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.authenticate(credential, profile);
keyVaultManager = KeyVaultManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class ChaosManagerTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_EAST;
private String resourceGroupName = "rg" + randomPadding();
private ChaosManager chaosManager;
private KeyVaultManager keyVaultManager;
private ResourceManager resourceManager;
private boolean testEnv;
@Override
public void beforeTest() {
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE);
chaosManager = ChaosManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS))
.authenticate(credential, profile);
keyVaultManager = KeyVaultManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile);
resourceManager = ResourceManager
.configure()
.withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(credential, profile)
.withDefaultSubscription();
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroupName = testResourceGroup;
} else {
resourceManager.resourceGroups()
.define(resourceGroupName)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
resourceManager.resourceGroups().beginDeleteByName(resourceGroupName);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
nice 👍 | public void shouldMonitor() throws InterruptedException, MalformedURLException {
SpringMonitorTest.class.getResourceAsStream("/logback.xml");
String response = restTemplate.getForObject(Controller.URL, String.class);
assertThat(response).isEqualTo("OK!");
countDownLatch.await(10, SECONDS);
assertThat(customValidationPolicy.url)
.isEqualTo(new URL("https:
List<TelemetryItem> telemetryItems = getTelemetryItems();
List<TelemetryItem> logs;
List<TelemetryItem> remoteDependencies;
List<TelemetryItem> requests;
long start = System.currentTimeMillis();
boolean found;
do {
telemetryItems = getTelemetryItems();
logs = getItemsForType(telemetryItems, "Message");
remoteDependencies = getItemsForType(telemetryItems, "RemoteDependency");
requests = getItemsForType(telemetryItems, "Request");
found = !logs.isEmpty() && !remoteDependencies.isEmpty() && !requests.isEmpty();
}
while (!found && System.currentTimeMillis() - start < SECONDS.toMillis(10));
TelemetryItem firstLogTelemetry = logs.get(0);
MonitorDomain logBaseData = firstLogTelemetry.getData().getBaseData();
MessageData logData = (MessageData) logBaseData;
assertThat(logData.getMessage()).startsWith("Starting SpringMonitorTest using");
assertThat(logData.getSeverityLevel()).isEqualTo(SeverityLevel.INFORMATION);
TelemetryItem remoteDependency = remoteDependencies.get(0);
MonitorDomain remoteBaseData = remoteDependency.getData().getBaseData();
RemoteDependencyData remoteDependencyData = (RemoteDependencyData) remoteBaseData;
assertThat(remoteDependencyData.getType()).isEqualTo("SQL");
assertThat(remoteDependencyData.getData())
.isEqualTo("create table test_table (id bigint not null, primary key (id))");
TelemetryItem request = requests.get(0);
MonitorDomain requestBaseData = request.getData().getBaseData();
RequestData requestData = (RequestData) requestBaseData;
assertThat(requestData.getUrl()).contains(Controller.URL);
assertThat(requestData.isSuccess()).isTrue();
assertThat(requestData.getResponseCode()).isEqualTo("200");
assertThat(requestData.getName()).isEqualTo("GET /controller-url");
} | assertThat(logData.getMessage()).startsWith("Starting SpringMonitorTest using"); | public void shouldMonitor() throws InterruptedException, MalformedURLException {
SpringMonitorTest.class.getResourceAsStream("/logback.xml");
String response = restTemplate.getForObject(Controller.URL, String.class);
assertThat(response).isEqualTo("OK!");
countDownLatch.await(10, SECONDS);
assertThat(customValidationPolicy.url)
.isEqualTo(new URL("https:
List<TelemetryItem> telemetryItems = getTelemetryItems();
List<TelemetryItem> logs;
List<TelemetryItem> remoteDependencies;
List<TelemetryItem> requests;
long start = System.currentTimeMillis();
boolean found;
do {
telemetryItems = getTelemetryItems();
logs = getItemsForType(telemetryItems, "Message");
remoteDependencies = getItemsForType(telemetryItems, "RemoteDependency");
requests = getItemsForType(telemetryItems, "Request");
found = !logs.isEmpty() && !remoteDependencies.isEmpty() && !requests.isEmpty();
}
while (!found && System.currentTimeMillis() - start < SECONDS.toMillis(10));
TelemetryItem firstLogTelemetry = logs.get(0);
MonitorDomain logBaseData = firstLogTelemetry.getData().getBaseData();
MessageData logData = (MessageData) logBaseData;
assertThat(logData.getMessage()).startsWith("Starting SpringMonitorTest using");
assertThat(logData.getSeverityLevel()).isEqualTo(SeverityLevel.INFORMATION);
TelemetryItem remoteDependency = remoteDependencies.get(0);
MonitorDomain remoteBaseData = remoteDependency.getData().getBaseData();
RemoteDependencyData remoteDependencyData = (RemoteDependencyData) remoteBaseData;
assertThat(remoteDependencyData.getType()).isEqualTo("SQL");
assertThat(remoteDependencyData.getData())
.isEqualTo("create table test_table (id bigint not null, primary key (id))");
TelemetryItem request = requests.get(0);
MonitorDomain requestBaseData = request.getData().getBaseData();
RequestData requestData = (RequestData) requestBaseData;
assertThat(requestData.getUrl()).contains(Controller.URL);
assertThat(requestData.isSuccess()).isTrue();
assertThat(requestData.getResponseCode()).isEqualTo("200");
assertThat(requestData.getName()).isEqualTo("GET /controller-url");
} | class TestConfig {
@Bean
HttpPipeline httpPipeline() {
countDownLatch = new CountDownLatch(2);
customValidationPolicy = new CustomValidationPolicy(countDownLatch);
return getHttpPipeline(customValidationPolicy);
}
HttpPipeline getHttpPipeline(@Nullable HttpPipelinePolicy policy) {
return new HttpPipelineBuilder()
.httpClient(HttpClient.createDefault())
.policies(policy)
.build();
}
@Bean
@Primary
SelfDiagnosticsLevel testSelfDiagnosticsLevel() {
return SelfDiagnosticsLevel.DEBUG;
}
} | class TestConfig {
@Bean
HttpPipeline httpPipeline() {
countDownLatch = new CountDownLatch(2);
customValidationPolicy = new CustomValidationPolicy(countDownLatch);
return getHttpPipeline(customValidationPolicy);
}
HttpPipeline getHttpPipeline(@Nullable HttpPipelinePolicy policy) {
return new HttpPipelineBuilder()
.httpClient(HttpClient.createDefault())
.policies(policy)
.build();
}
@Bean
@Primary
SelfDiagnosticsLevel testSelfDiagnosticsLevel() {
return SelfDiagnosticsLevel.DEBUG;
}
} |
```suggestion ``` | OpenTelemetryInjector injectOtelIntoJdbcDriver() {
System.out.println("AzureJdbcDriverAutoConfiguration.injectOtelIntoJdbcDriver");
return openTelemetry -> OpenTelemetryDriver.install(openTelemetry);
} | System.out.println("AzureJdbcDriverAutoConfiguration.injectOtelIntoJdbcDriver"); | OpenTelemetryInjector injectOtelIntoJdbcDriver() {
return openTelemetry -> OpenTelemetryDriver.install(openTelemetry);
} | class AzureJdbcDriverAutoConfiguration {
@Bean
@Bean
BeanFactoryPostProcessor openTelemetryBeanCreatedBeforeDatasourceBean() {
return configurableBeanFactory -> {
BeanDefinition dataSourceBean = configurableBeanFactory.getBeanDefinition("dataSource");
dataSourceBean.setDependsOn("openTelemetry");
};
}
} | class AzureJdbcDriverAutoConfiguration {
@Bean
@Bean
BeanFactoryPostProcessor openTelemetryBeanCreatedBeforeDatasourceBean() {
return configurableBeanFactory -> {
BeanDefinition dataSourceBean = configurableBeanFactory.getBeanDefinition("dataSource");
dataSourceBean.setDependsOn("openTelemetry");
};
}
} |
I dont think we use this path anymore :-)! There are various other composite continuation tokens which could mark the continuation token as clientSideContinuationTokens. | private static boolean isClientSideContinuationToken(String continuationToken) {
if (continuationToken != null) {
ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<>();
if (CompositeContinuationToken.tryParse(continuationToken, outCompositeContinuationToken)) {
return true;
}
ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<>();
if (OrderByContinuationToken.tryParse(continuationToken, outOrderByContinuationToken)) {
return true;
}
ValueHolder<TopContinuationToken> outTopContinuationToken = new ValueHolder<>();
if (TopContinuationToken.tryParse(continuationToken, outTopContinuationToken)) {
return true;
}
ValueHolder<LimitContinuationToken> outLimitContinuationToken = new ValueHolder<>();
return LimitContinuationToken.tryParse(continuationToken, outLimitContinuationToken);
}
return false;
} | ValueHolder<LimitContinuationToken> outLimitContinuationToken = new ValueHolder<>(); | private static boolean isClientSideContinuationToken(String continuationToken) {
if (continuationToken != null) {
ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<>();
if (CompositeContinuationToken.tryParse(continuationToken, outCompositeContinuationToken)) {
return true;
}
ValueHolder<OrderByContinuationToken> outOrderByContinuationToken = new ValueHolder<>();
if (OrderByContinuationToken.tryParse(continuationToken, outOrderByContinuationToken)) {
return true;
}
ValueHolder<TopContinuationToken> outTopContinuationToken = new ValueHolder<>();
if (TopContinuationToken.tryParse(continuationToken, outTopContinuationToken)) {
return true;
}
ValueHolder<LimitContinuationToken> outLimitContinuationToken = new ValueHolder<>();
return LimitContinuationToken.tryParse(continuationToken, outLimitContinuationToken);
}
return false;
} | class DefaultDocumentQueryExecutionContext<T> extends DocumentQueryExecutionContextBase<T> {
private final AtomicInteger retries = new AtomicInteger(-1);
private final SchedulingStopwatch fetchSchedulingMetrics;
private final FetchExecutionRangeAccumulator fetchExecutionRangeAccumulator;
private static final String DEFAULT_PARTITION_RANGE = "00-FF";
private final Function<JsonNode, T> factoryMethod;
public DefaultDocumentQueryExecutionContext(DiagnosticsClientContext diagnosticsClientContext, IDocumentQueryClient client, ResourceType resourceTypeEnum,
Class<T> resourceType, SqlQuerySpec query, CosmosQueryRequestOptions cosmosQueryRequestOptions, String resourceLink,
UUID correlatedActivityId, final AtomicBoolean isQueryCancelledOnTimeout) {
super(diagnosticsClientContext, client,
resourceTypeEnum,
resourceType,
query,
cosmosQueryRequestOptions,
resourceLink,
correlatedActivityId,
isQueryCancelledOnTimeout);
this.fetchSchedulingMetrics = new SchedulingStopwatch();
this.fetchSchedulingMetrics.ready();
this.fetchExecutionRangeAccumulator = new FetchExecutionRangeAccumulator(DEFAULT_PARTITION_RANGE);
this.factoryMethod = DocumentQueryExecutionContextBase.getEffectiveFactoryMethod(
cosmosQueryRequestOptions,
false,
resourceType);
}
protected PartitionKeyInternal getPartitionKeyInternal() {
return this.cosmosQueryRequestOptions.getPartitionKey() == null ? null : BridgeInternal.getPartitionKeyInternal(cosmosQueryRequestOptions.getPartitionKey());
}
@Override
public Flux<FeedResponse<T>> executeAsync() {
if (cosmosQueryRequestOptions == null) {
cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
}
CosmosQueryRequestOptions newCosmosQueryRequestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.clone(cosmosQueryRequestOptions);
String originalContinuation = ModelBridgeInternal.getRequestContinuationFromQueryRequestOptions(newCosmosQueryRequestOptions);
if (isClientSideContinuationToken(originalContinuation)) {
ModelBridgeInternal.setQueryRequestOptionsContinuationToken(newCosmosQueryRequestOptions, null);
newCosmosQueryRequestOptions.setMaxDegreeOfParallelism(Integer.MAX_VALUE);
}
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(newCosmosQueryRequestOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : Constants.Properties.DEFAULT_MAX_PAGE_SIZE;
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = this::createRequestAsync;
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = executeInternalAsyncFunc();
return Paginator
.getPaginatedQueryResultAsObservable(
newCosmosQueryRequestOptions, createRequestFunc, executeFunc, maxPageSize);
}
public Mono<List<PartitionKeyRange>> getTargetPartitionKeyRanges(String resourceId, List<Range<String>> queryRanges) {
return RoutingMapProviderHelper.getOverlappingRanges(client.getPartitionKeyRangeCache(), resourceId, queryRanges);
}
public Mono<Range<String>> getTargetRange(String collectionRid, FeedRangeInternal feedRangeInternal) {
return feedRangeInternal.getNormalizedEffectiveRange(client.getPartitionKeyRangeCache(),
/*metadataDiagnosticsCtx*/null,
this.client.getCollectionCache().resolveByRidAsync(
/*metadataDiagnosticsCtx*/ null,
collectionRid,
/*properties*/null));
}
public Mono<List<PartitionKeyRange>> getTargetPartitionKeyRangesById(String resourceId,
String partitionKeyRangeIdInternal) {
return client.getPartitionKeyRangeCache()
.tryGetPartitionKeyRangeByIdAsync(null,
resourceId,
partitionKeyRangeIdInternal,
false,
null)
.flatMap(partitionKeyRange -> Mono.just(Collections.singletonList(partitionKeyRange.v)));
}
private DocumentClientRetryPolicy createClientRetryPolicyInstance() {
RxCollectionCache collectionCache = this.client.getCollectionCache();
IPartitionKeyRangeCache partitionKeyRangeCache = this.client.getPartitionKeyRangeCache();
DocumentClientRetryPolicy retryPolicyInstance = this.client.getResetSessionTokenRetryPolicy().getRequestPolicy(this.diagnosticsClientContext);
retryPolicyInstance = new InvalidPartitionExceptionRetryPolicy(
collectionCache,
retryPolicyInstance,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(this.cosmosQueryRequestOptions));
if (super.resourceTypeEnum.isPartitioned()) {
retryPolicyInstance = new PartitionKeyRangeGoneRetryPolicy(this.diagnosticsClientContext,
collectionCache,
partitionKeyRangeCache,
PathsHelper.getCollectionPath(super.resourceLink),
retryPolicyInstance,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(this.cosmosQueryRequestOptions));
}
return retryPolicyInstance;
}
protected Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeInternalAsyncFunc() {
return req -> this.client.executeFeedOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Query,
this::createClientRetryPolicyInstance,
req,
this::executeInternalFuncCore);
}
private Mono<FeedResponse<T>> executeInternalFuncCore(
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req
) {
DocumentClientRetryPolicy finalRetryPolicyInstance = retryPolicyFactory.get();
finalRetryPolicyInstance.onBeforeSendRequest(req);
this.fetchExecutionRangeAccumulator.beginFetchRange();
this.fetchSchedulingMetrics.start();
return BackoffRetryUtility.executeRetry(() -> {
this.retries.incrementAndGet();
return executeRequestAsync(
this.factoryMethod,
req);
}, finalRetryPolicyInstance)
.map(tFeedResponse -> {
this.fetchSchedulingMetrics.stop();
this.fetchExecutionRangeAccumulator.endFetchRange(tFeedResponse.getActivityId(),
tFeedResponse.getResults().size(),
this.retries.get());
ImmutablePair<String, SchedulingTimeSpan> schedulingTimeSpanMap =
new ImmutablePair<>(DEFAULT_PARTITION_RANGE, this.fetchSchedulingMetrics.getElapsedTime());
if (!StringUtils.isEmpty(tFeedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.QUERY_METRICS))) {
QueryMetrics qm =
BridgeInternal.createQueryMetricsFromDelimitedStringAndClientSideMetrics(tFeedResponse.getResponseHeaders()
.get(HttpConstants.HttpHeaders.QUERY_METRICS),
new ClientSideMetrics(this.retries.get(),
tFeedResponse.getRequestCharge(),
this.fetchExecutionRangeAccumulator.getExecutionRanges(),
Collections.singletonList(schedulingTimeSpanMap)),
tFeedResponse.getActivityId(),
tFeedResponse.getResponseHeaders().getOrDefault(HttpConstants.HttpHeaders.INDEX_UTILIZATION, null));
String pkrId = tFeedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID);
String queryMetricKey = DEFAULT_PARTITION_RANGE + ",pkrId:" + pkrId;
BridgeInternal.putQueryMetricsIntoMap(tFeedResponse, queryMetricKey, qm);
}
return tFeedResponse;
});
}
public RxDocumentServiceRequest createRequestAsync(String continuationToken, Integer maxPageSize) {
Map<String, String> requestHeaders = this.createCommonHeadersAsync(
this.getFeedOptions(continuationToken, maxPageSize));
RxDocumentServiceRequest request = this.createDocumentServiceRequest(
requestHeaders,
this.query,
this.getPartitionKeyInternal());
if (!StringUtils.isEmpty(getPartitionKeyRangeIdInternal(cosmosQueryRequestOptions))) {
request.routeTo(new PartitionKeyRangeIdentity(getPartitionKeyRangeIdInternal(cosmosQueryRequestOptions)));
}
return request;
}
} | class DefaultDocumentQueryExecutionContext<T> extends DocumentQueryExecutionContextBase<T> {
private final AtomicInteger retries = new AtomicInteger(-1);
private final SchedulingStopwatch fetchSchedulingMetrics;
private final FetchExecutionRangeAccumulator fetchExecutionRangeAccumulator;
private static final String DEFAULT_PARTITION_RANGE = "00-FF";
private final Function<JsonNode, T> factoryMethod;
public DefaultDocumentQueryExecutionContext(DiagnosticsClientContext diagnosticsClientContext, IDocumentQueryClient client, ResourceType resourceTypeEnum,
Class<T> resourceType, SqlQuerySpec query, CosmosQueryRequestOptions cosmosQueryRequestOptions, String resourceLink,
UUID correlatedActivityId, final AtomicBoolean isQueryCancelledOnTimeout) {
super(diagnosticsClientContext, client,
resourceTypeEnum,
resourceType,
query,
cosmosQueryRequestOptions,
resourceLink,
correlatedActivityId,
isQueryCancelledOnTimeout);
this.fetchSchedulingMetrics = new SchedulingStopwatch();
this.fetchSchedulingMetrics.ready();
this.fetchExecutionRangeAccumulator = new FetchExecutionRangeAccumulator(DEFAULT_PARTITION_RANGE);
this.factoryMethod = DocumentQueryExecutionContextBase.getEffectiveFactoryMethod(
cosmosQueryRequestOptions,
false,
resourceType);
}
protected PartitionKeyInternal getPartitionKeyInternal() {
return this.cosmosQueryRequestOptions.getPartitionKey() == null ? null : BridgeInternal.getPartitionKeyInternal(cosmosQueryRequestOptions.getPartitionKey());
}
@Override
public Flux<FeedResponse<T>> executeAsync() {
if (cosmosQueryRequestOptions == null) {
cosmosQueryRequestOptions = new CosmosQueryRequestOptions();
}
CosmosQueryRequestOptions newCosmosQueryRequestOptions = ImplementationBridgeHelpers
.CosmosQueryRequestOptionsHelper
.getCosmosQueryRequestOptionsAccessor()
.clone(cosmosQueryRequestOptions);
String originalContinuation = ModelBridgeInternal.getRequestContinuationFromQueryRequestOptions(newCosmosQueryRequestOptions);
if (isClientSideContinuationToken(originalContinuation)) {
ModelBridgeInternal.setQueryRequestOptionsContinuationToken(newCosmosQueryRequestOptions, null);
newCosmosQueryRequestOptions.setMaxDegreeOfParallelism(Integer.MAX_VALUE);
}
Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(newCosmosQueryRequestOptions);
int maxPageSize = maxItemCount != null ? maxItemCount : Constants.Properties.DEFAULT_MAX_PAGE_SIZE;
BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = this::createRequestAsync;
Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = executeInternalAsyncFunc();
return Paginator
.getPaginatedQueryResultAsObservable(
newCosmosQueryRequestOptions, createRequestFunc, executeFunc, maxPageSize);
}
public Mono<List<PartitionKeyRange>> getTargetPartitionKeyRanges(String resourceId, List<Range<String>> queryRanges) {
return RoutingMapProviderHelper.getOverlappingRanges(client.getPartitionKeyRangeCache(), resourceId, queryRanges);
}
public Mono<Range<String>> getTargetRange(String collectionRid, FeedRangeInternal feedRangeInternal) {
return feedRangeInternal.getNormalizedEffectiveRange(client.getPartitionKeyRangeCache(),
/*metadataDiagnosticsCtx*/null,
this.client.getCollectionCache().resolveByRidAsync(
/*metadataDiagnosticsCtx*/ null,
collectionRid,
/*properties*/null));
}
public Mono<List<PartitionKeyRange>> getTargetPartitionKeyRangesById(String resourceId,
String partitionKeyRangeIdInternal) {
return client.getPartitionKeyRangeCache()
.tryGetPartitionKeyRangeByIdAsync(null,
resourceId,
partitionKeyRangeIdInternal,
false,
null)
.flatMap(partitionKeyRange -> Mono.just(Collections.singletonList(partitionKeyRange.v)));
}
private DocumentClientRetryPolicy createClientRetryPolicyInstance() {
RxCollectionCache collectionCache = this.client.getCollectionCache();
IPartitionKeyRangeCache partitionKeyRangeCache = this.client.getPartitionKeyRangeCache();
DocumentClientRetryPolicy retryPolicyInstance = this.client.getResetSessionTokenRetryPolicy().getRequestPolicy(this.diagnosticsClientContext);
retryPolicyInstance = new InvalidPartitionExceptionRetryPolicy(
collectionCache,
retryPolicyInstance,
resourceLink,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(this.cosmosQueryRequestOptions));
if (super.resourceTypeEnum.isPartitioned()) {
retryPolicyInstance = new PartitionKeyRangeGoneRetryPolicy(this.diagnosticsClientContext,
collectionCache,
partitionKeyRangeCache,
PathsHelper.getCollectionPath(super.resourceLink),
retryPolicyInstance,
ModelBridgeInternal.getPropertiesFromQueryRequestOptions(this.cosmosQueryRequestOptions));
}
return retryPolicyInstance;
}
protected Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeInternalAsyncFunc() {
return req -> this.client.executeFeedOperationWithAvailabilityStrategy(
ResourceType.Document,
OperationType.Query,
this::createClientRetryPolicyInstance,
req,
this::executeInternalFuncCore);
}
private Mono<FeedResponse<T>> executeInternalFuncCore(
final Supplier<DocumentClientRetryPolicy> retryPolicyFactory,
final RxDocumentServiceRequest req
) {
DocumentClientRetryPolicy finalRetryPolicyInstance = retryPolicyFactory.get();
finalRetryPolicyInstance.onBeforeSendRequest(req);
this.fetchExecutionRangeAccumulator.beginFetchRange();
this.fetchSchedulingMetrics.start();
return BackoffRetryUtility.executeRetry(() -> {
this.retries.incrementAndGet();
return executeRequestAsync(
this.factoryMethod,
req);
}, finalRetryPolicyInstance)
.map(tFeedResponse -> {
this.fetchSchedulingMetrics.stop();
this.fetchExecutionRangeAccumulator.endFetchRange(tFeedResponse.getActivityId(),
tFeedResponse.getResults().size(),
this.retries.get());
ImmutablePair<String, SchedulingTimeSpan> schedulingTimeSpanMap =
new ImmutablePair<>(DEFAULT_PARTITION_RANGE, this.fetchSchedulingMetrics.getElapsedTime());
if (!StringUtils.isEmpty(tFeedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.QUERY_METRICS))) {
QueryMetrics qm =
BridgeInternal.createQueryMetricsFromDelimitedStringAndClientSideMetrics(tFeedResponse.getResponseHeaders()
.get(HttpConstants.HttpHeaders.QUERY_METRICS),
new ClientSideMetrics(this.retries.get(),
tFeedResponse.getRequestCharge(),
this.fetchExecutionRangeAccumulator.getExecutionRanges(),
Collections.singletonList(schedulingTimeSpanMap)),
tFeedResponse.getActivityId(),
tFeedResponse.getResponseHeaders().getOrDefault(HttpConstants.HttpHeaders.INDEX_UTILIZATION, null));
String pkrId = tFeedResponse.getResponseHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID);
String queryMetricKey = DEFAULT_PARTITION_RANGE + ",pkrId:" + pkrId;
BridgeInternal.putQueryMetricsIntoMap(tFeedResponse, queryMetricKey, qm);
}
return tFeedResponse;
});
}
public RxDocumentServiceRequest createRequestAsync(String continuationToken, Integer maxPageSize) {
Map<String, String> requestHeaders = this.createCommonHeadersAsync(
this.getFeedOptions(continuationToken, maxPageSize));
RxDocumentServiceRequest request = this.createDocumentServiceRequest(
requestHeaders,
this.query,
this.getPartitionKeyInternal());
if (!StringUtils.isEmpty(getPartitionKeyRangeIdInternal(cosmosQueryRequestOptions))) {
request.routeTo(new PartitionKeyRangeIdentity(getPartitionKeyRangeIdInternal(cosmosQueryRequestOptions)));
}
return request;
}
} |
I think this should have a try/catch as well where a clear `IllegalStateException` is thrown explaining the issue. This may help us in the future if this begins to return another format such as an ISO-8601 datestring (yyyy-MM-ddThh:mm:ss) | static OffsetDateTime parseWindowsFileTimeOrDateString(String date) {
try {
return fromWindowsFileTimeOrNull(Long.parseLong(date));
} catch (NumberFormatException ex) {
return parseDateOrNull(date);
}
} | return parseDateOrNull(date); | static OffsetDateTime parseWindowsFileTimeOrDateString(String date) {
if (date == null) {
return null;
}
try {
return fromWindowsFileTimeOrNull(Long.parseLong(date));
} catch (Exception ex) {
if (ex instanceof NumberFormatException) {
return parseDateOrNull(date);
}
throw LOGGER.logExceptionAsError(new RuntimeException("Failed to parse date string: " + date, ex));
}
} | class Transforms {
private static final String SERIALIZATION_MESSAGE = String.format("'serialization' must be one of %s, %s, %s or "
+ "%s.", FileQueryJsonSerialization.class.getSimpleName(),
FileQueryDelimitedSerialization.class.getSimpleName(), FileQueryArrowSerialization.class.getSimpleName(),
FileQueryParquetSerialization.class.getSimpleName());
private static final long EPOCH_CONVERSION;
public static final HttpHeaderName X_MS_ENCRYPTION_CONTEXT = HttpHeaderName.fromString("x-ms-encryption-context");
public static final HttpHeaderName X_MS_OWNER = HttpHeaderName.fromString("x-ms-owner");
public static final HttpHeaderName X_MS_GROUP = HttpHeaderName.fromString("x-ms-group");
public static final HttpHeaderName X_MS_PERMISSIONS = HttpHeaderName.fromString("x-ms-permissions");
public static final HttpHeaderName X_MS_CONTINUATION = HttpHeaderName.fromString("x-ms-continuation");
static {
GregorianCalendar unixEpoch = new GregorianCalendar();
unixEpoch.clear();
unixEpoch.set(1970, Calendar.JANUARY, 1, 0, 0, 0);
GregorianCalendar windowsEpoch = new GregorianCalendar();
windowsEpoch.clear();
windowsEpoch.set(1601, Calendar.JANUARY, 1, 0, 0, 0);
EPOCH_CONVERSION = unixEpoch.getTimeInMillis() - windowsEpoch.getTimeInMillis();
}
static com.azure.storage.blob.models.PublicAccessType toBlobPublicAccessType(PublicAccessType
fileSystemPublicAccessType) {
if (fileSystemPublicAccessType == null) {
return null;
}
return com.azure.storage.blob.models.PublicAccessType.fromString(fileSystemPublicAccessType.toString());
}
private static LeaseDurationType toDataLakeLeaseDurationType(com.azure.storage.blob.models.LeaseDurationType
blobLeaseDurationType) {
if (blobLeaseDurationType == null) {
return null;
}
return LeaseDurationType.fromString(blobLeaseDurationType.toString());
}
private static LeaseStateType toDataLakeLeaseStateType(com.azure.storage.blob.models.LeaseStateType
blobLeaseStateType) {
if (blobLeaseStateType == null) {
return null;
}
return LeaseStateType.fromString(blobLeaseStateType.toString());
}
private static LeaseStatusType toDataLakeLeaseStatusType(com.azure.storage.blob.models.LeaseStatusType
blobLeaseStatusType) {
if (blobLeaseStatusType == null) {
return null;
}
return LeaseStatusType.fromString(blobLeaseStatusType.toString());
}
private static PublicAccessType toDataLakePublicAccessType(com.azure.storage.blob.models.PublicAccessType
blobPublicAccessType) {
if (blobPublicAccessType == null) {
return null;
}
return PublicAccessType.fromString(blobPublicAccessType.toString());
}
private static CopyStatusType toDataLakeCopyStatusType(
com.azure.storage.blob.models.CopyStatusType blobCopyStatus) {
if (blobCopyStatus == null) {
return null;
}
return CopyStatusType.fromString(blobCopyStatus.toString());
}
private static ArchiveStatus toDataLakeArchiveStatus(
com.azure.storage.blob.models.ArchiveStatus blobArchiveStatus) {
if (blobArchiveStatus == null) {
return null;
}
return ArchiveStatus.fromString(blobArchiveStatus.toString());
}
private static AccessTier toDataLakeAccessTier(com.azure.storage.blob.models.AccessTier blobAccessTier) {
if (blobAccessTier == null) {
return null;
}
return AccessTier.fromString(blobAccessTier.toString());
}
static FileSystemProperties toFileSystemProperties(BlobContainerProperties blobContainerProperties) {
if (blobContainerProperties == null) {
return null;
}
FileSystemProperties fileSystemProperties = new FileSystemProperties(blobContainerProperties.getMetadata(), blobContainerProperties.getETag(),
blobContainerProperties.getLastModified(),
Transforms.toDataLakeLeaseDurationType(blobContainerProperties.getLeaseDuration()),
Transforms.toDataLakeLeaseStateType(blobContainerProperties.getLeaseState()),
Transforms.toDataLakeLeaseStatusType(blobContainerProperties.getLeaseStatus()),
Transforms.toDataLakePublicAccessType(blobContainerProperties.getBlobPublicAccess()),
blobContainerProperties.hasImmutabilityPolicy(), blobContainerProperties.hasLegalHold());
return AccessorUtility.getFileSystemPropertiesAccessor()
.setFileSystemProperties(fileSystemProperties, blobContainerProperties.getDefaultEncryptionScope(),
blobContainerProperties.isEncryptionScopeOverridePrevented());
}
private static BlobContainerListDetails toBlobContainerListDetails(FileSystemListDetails fileSystemListDetails) {
if (fileSystemListDetails == null) {
return null;
}
return new BlobContainerListDetails()
.setRetrieveMetadata(fileSystemListDetails.getRetrieveMetadata())
.setRetrieveDeleted(fileSystemListDetails.getRetrieveDeleted())
.setRetrieveSystemContainers(fileSystemListDetails.getRetrieveSystemFileSystems());
}
static ListBlobContainersOptions toListBlobContainersOptions(ListFileSystemsOptions listFileSystemsOptions) {
if (listFileSystemsOptions == null) {
return null;
}
return new ListBlobContainersOptions()
.setDetails(toBlobContainerListDetails(listFileSystemsOptions.getDetails()))
.setMaxResultsPerPage(listFileSystemsOptions.getMaxResultsPerPage())
.setPrefix(listFileSystemsOptions.getPrefix());
}
static UserDelegationKey toDataLakeUserDelegationKey(com.azure.storage.blob.models.UserDelegationKey
blobUserDelegationKey) {
if (blobUserDelegationKey == null) {
return null;
}
return new UserDelegationKey()
.setSignedExpiry(blobUserDelegationKey.getSignedExpiry())
.setSignedObjectId(blobUserDelegationKey.getSignedObjectId())
.setSignedTenantId(blobUserDelegationKey.getSignedTenantId())
.setSignedService(blobUserDelegationKey.getSignedService())
.setSignedStart(blobUserDelegationKey.getSignedStart())
.setSignedVersion(blobUserDelegationKey.getSignedVersion())
.setValue(blobUserDelegationKey.getValue());
}
static BlobHttpHeaders toBlobHttpHeaders(PathHttpHeaders pathHTTPHeaders) {
if (pathHTTPHeaders == null) {
return null;
}
return new BlobHttpHeaders()
.setCacheControl(pathHTTPHeaders.getCacheControl())
.setContentDisposition(pathHTTPHeaders.getContentDisposition())
.setContentEncoding(pathHTTPHeaders.getContentEncoding())
.setContentLanguage(pathHTTPHeaders.getContentLanguage())
.setContentType(pathHTTPHeaders.getContentType())
.setContentMd5(pathHTTPHeaders.getContentMd5());
}
static BlobInputStreamOptions toBlobInputStreamOptions(DataLakeFileInputStreamOptions options) {
if (options == null) {
return null;
}
return new BlobInputStreamOptions()
.setBlockSize(options.getBlockSize())
.setRange(toBlobRange(options.getRange()))
.setRequestConditions(toBlobRequestConditions(options.getRequestConditions()))
.setConsistentReadControl(toBlobConsistentReadControl(options.getConsistentReadControl()));
}
static com.azure.storage.blob.models.ConsistentReadControl toBlobConsistentReadControl(
com.azure.storage.file.datalake.models.ConsistentReadControl datalakeConsistentReadControl) {
if (datalakeConsistentReadControl == null) {
return null;
}
switch (datalakeConsistentReadControl) {
case NONE:
return ConsistentReadControl.NONE;
case ETAG:
return ConsistentReadControl.ETAG;
default:
throw new IllegalArgumentException("Could not convert ConsistentReadControl");
}
}
static BlobRange toBlobRange(FileRange fileRange) {
if (fileRange == null) {
return null;
}
return new BlobRange(fileRange.getOffset(), fileRange.getCount());
}
static com.azure.storage.blob.models.DownloadRetryOptions toBlobDownloadRetryOptions(
DownloadRetryOptions dataLakeOptions) {
if (dataLakeOptions == null) {
return null;
}
return new com.azure.storage.blob.models.DownloadRetryOptions()
.setMaxRetryRequests(dataLakeOptions.getMaxRetryRequests());
}
static PathProperties toPathProperties(BlobProperties properties) {
return toPathProperties(properties, null);
}
static PathProperties toPathProperties(BlobProperties properties, Response<?> r) {
if (properties == null) {
return null;
} else {
PathProperties pathProperties = new PathProperties(properties.getCreationTime(), properties.getLastModified(),
properties.getETag(), properties.getBlobSize(), properties.getContentType(), properties.getContentMd5(),
properties.getContentEncoding(), properties.getContentDisposition(), properties.getContentLanguage(),
properties.getCacheControl(), Transforms.toDataLakeLeaseStatusType(properties.getLeaseStatus()),
Transforms.toDataLakeLeaseStateType(properties.getLeaseState()),
Transforms.toDataLakeLeaseDurationType(properties.getLeaseDuration()), properties.getCopyId(),
Transforms.toDataLakeCopyStatusType(properties.getCopyStatus()), properties.getCopySource(),
properties.getCopyProgress(), properties.getCopyCompletionTime(), properties.getCopyStatusDescription(),
properties.isServerEncrypted(), properties.isIncrementalCopy(),
Transforms.toDataLakeAccessTier(properties.getAccessTier()),
Transforms.toDataLakeArchiveStatus(properties.getArchiveStatus()), properties.getEncryptionKeySha256(),
properties.getAccessTierChangeTime(), properties.getMetadata(), properties.getExpiresOn());
if (r == null) {
return pathProperties;
} else {
String encryptionContext = r.getHeaders().getValue(X_MS_ENCRYPTION_CONTEXT);
String owner = r.getHeaders().getValue(X_MS_OWNER);
String group = r.getHeaders().getValue(X_MS_GROUP);
String permissions = r.getHeaders().getValue(X_MS_PERMISSIONS);
return AccessorUtility.getPathPropertiesAccessor().setPathProperties(pathProperties,
properties.getEncryptionScope(), encryptionContext, owner, group, permissions);
}
}
}
static FileSystemItem toFileSystemItem(BlobContainerItem blobContainerItem) {
if (blobContainerItem == null) {
return null;
}
return new FileSystemItem()
.setName(blobContainerItem.getName())
.setDeleted(blobContainerItem.isDeleted())
.setVersion(blobContainerItem.getVersion())
.setMetadata(blobContainerItem.getMetadata())
.setProperties(Transforms.toFileSystemItemProperties(blobContainerItem.getProperties()));
}
private static FileSystemItemProperties toFileSystemItemProperties(
BlobContainerItemProperties blobContainerItemProperties) {
if (blobContainerItemProperties == null) {
return null;
}
return new FileSystemItemProperties()
.setETag(blobContainerItemProperties.getETag())
.setLastModified(blobContainerItemProperties.getLastModified())
.setLeaseStatus(toDataLakeLeaseStatusType(blobContainerItemProperties.getLeaseStatus()))
.setLeaseState(toDataLakeLeaseStateType(blobContainerItemProperties.getLeaseState()))
.setLeaseDuration(toDataLakeLeaseDurationType(blobContainerItemProperties.getLeaseDuration()))
.setPublicAccess(toDataLakePublicAccessType(blobContainerItemProperties.getPublicAccess()))
.setHasLegalHold(blobContainerItemProperties.isHasLegalHold())
.setHasImmutabilityPolicy(blobContainerItemProperties.isHasImmutabilityPolicy())
.setEncryptionScope(blobContainerItemProperties.getDefaultEncryptionScope())
.setEncryptionScopeOverridePrevented(blobContainerItemProperties.isEncryptionScopeOverridePrevented());
}
static PathItem toPathItem(Path path) {
if (path == null) {
return null;
}
PathItem pathItem = new PathItem(path.getETag(),
parseDateOrNull(path.getLastModified()), path.getContentLength() == null ? 0 : path.getContentLength(),
path.getGroup(), path.isDirectory() != null && path.isDirectory(), path.getName(), path.getOwner(),
path.getPermissions(),
parseWindowsFileTimeOrDateString(path.getCreationTime()),
parseWindowsFileTimeOrDateString(path.getExpiryTime()));
return AccessorUtility.getPathItemAccessor().setPathItemProperties(pathItem, path.getEncryptionScope(), path.getEncryptionContext());
}
private static OffsetDateTime parseDateOrNull(String date) {
return date == null ? null : OffsetDateTime.parse(date, DateTimeFormatter.RFC_1123_DATE_TIME);
}
private static OffsetDateTime fromWindowsFileTimeOrNull(long fileTime) {
if (fileTime == 0) {
return null;
}
long fileTimeMs = fileTime / 10000;
long fileTimeUnixEpoch = fileTimeMs - EPOCH_CONVERSION;
return Instant.ofEpochMilli(fileTimeUnixEpoch).atOffset(ZoneOffset.UTC);
}
static BlobRequestConditions toBlobRequestConditions(DataLakeRequestConditions requestConditions) {
if (requestConditions == null) {
return null;
}
return new BlobRequestConditions()
.setLeaseId(requestConditions.getLeaseId())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfMatch(requestConditions.getIfMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince());
}
static FileReadResponse toFileReadResponse(BlobDownloadResponse r) {
if (r == null) {
return null;
}
return new FileReadResponse(Transforms.toFileReadAsyncResponse(new BlobDownloadAsyncResponse(r.getRequest(),
r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders())));
}
static FileReadAsyncResponse toFileReadAsyncResponse(BlobDownloadAsyncResponse r) {
if (r == null) {
return null;
}
return new FileReadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(),
Transforms.toPathReadHeaders(r.getDeserializedHeaders(), r.getHeaders().getValue(X_MS_ENCRYPTION_CONTEXT)));
}
private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h, String encryptionContext) {
if (h == null) {
return null;
}
return new FileReadHeaders()
.setLastModified(h.getLastModified())
.setMetadata(h.getMetadata())
.setContentLength(h.getContentLength())
.setContentType(h.getContentType())
.setContentRange(h.getContentRange())
.setETag(h.getETag())
.setContentMd5(h.getContentMd5())
.setContentEncoding(h.getContentEncoding())
.setCacheControl(h.getCacheControl())
.setContentDisposition(h.getContentDisposition())
.setContentLanguage(h.getContentLanguage())
.setCopyCompletionTime(h.getCopyCompletionTime())
.setCopyStatusDescription(h.getCopyStatusDescription())
.setCopyId(h.getCopyId())
.setCopyProgress(h.getCopyProgress())
.setCopySource(h.getCopySource())
.setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus()))
.setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration()))
.setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState()))
.setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus()))
.setClientRequestId(h.getClientRequestId())
.setRequestId(h.getRequestId())
.setVersion(h.getVersion())
.setAcceptRanges(h.getAcceptRanges())
.setDateProperty(h.getDateProperty())
.setServerEncrypted(h.isServerEncrypted())
.setEncryptionKeySha256(h.getEncryptionKeySha256())
.setFileContentMd5(h.getBlobContentMD5())
.setContentCrc64(h.getContentCrc64())
.setErrorCode(h.getErrorCode())
.setCreationTime(h.getCreationTime())
.setEncryptionContext(encryptionContext);
}
static List<BlobSignedIdentifier> toBlobIdentifierList(List<DataLakeSignedIdentifier> identifiers) {
if (identifiers == null) {
return null;
}
List<BlobSignedIdentifier> blobIdentifiers = new ArrayList<>();
for (DataLakeSignedIdentifier identifier : identifiers) {
blobIdentifiers.add(Transforms.toBlobIdentifier(identifier));
}
return blobIdentifiers;
}
private static BlobSignedIdentifier toBlobIdentifier(DataLakeSignedIdentifier identifier) {
if (identifier == null) {
return null;
}
return new BlobSignedIdentifier()
.setId(identifier.getId())
.setAccessPolicy(Transforms.toBlobAccessPolicy(identifier.getAccessPolicy()));
}
private static BlobAccessPolicy toBlobAccessPolicy(DataLakeAccessPolicy accessPolicy) {
if (accessPolicy == null) {
return null;
}
return new BlobAccessPolicy()
.setExpiresOn(accessPolicy.getExpiresOn())
.setStartsOn(accessPolicy.getStartsOn())
.setPermissions(accessPolicy.getPermissions());
}
static FileSystemAccessPolicies toFileSystemAccessPolicies(BlobContainerAccessPolicies accessPolicies) {
if (accessPolicies == null) {
return null;
}
return new FileSystemAccessPolicies(Transforms.toDataLakePublicAccessType(accessPolicies.getBlobAccessType()),
Transforms.toDataLakeIdentifierList(accessPolicies.getIdentifiers()));
}
static List<DataLakeSignedIdentifier> toDataLakeIdentifierList(List<BlobSignedIdentifier> identifiers) {
if (identifiers == null) {
return null;
}
List<DataLakeSignedIdentifier> dataLakeIdentifiers = new ArrayList<>();
for (BlobSignedIdentifier identifier : identifiers) {
dataLakeIdentifiers.add(Transforms.toDataLakeIdentifier(identifier));
}
return dataLakeIdentifiers;
}
private static DataLakeSignedIdentifier toDataLakeIdentifier(BlobSignedIdentifier identifier) {
if (identifier == null) {
return null;
}
return new DataLakeSignedIdentifier()
.setId(identifier.getId())
.setAccessPolicy(Transforms.toDataLakeAccessPolicy(identifier.getAccessPolicy()));
}
private static DataLakeAccessPolicy toDataLakeAccessPolicy(BlobAccessPolicy accessPolicy) {
if (accessPolicy == null) {
return null;
}
return new DataLakeAccessPolicy()
.setExpiresOn(accessPolicy.getExpiresOn())
.setStartsOn(accessPolicy.getStartsOn())
.setPermissions(accessPolicy.getPermissions());
}
static BlobQuerySerialization toBlobQuerySerialization(FileQuerySerialization ser) {
if (ser == null) {
return null;
}
if (ser instanceof FileQueryJsonSerialization) {
FileQueryJsonSerialization jsonSer = (FileQueryJsonSerialization) ser;
return new BlobQueryJsonSerialization().setRecordSeparator(jsonSer.getRecordSeparator());
} else if (ser instanceof FileQueryDelimitedSerialization) {
FileQueryDelimitedSerialization delSer = (FileQueryDelimitedSerialization) ser;
return new BlobQueryDelimitedSerialization()
.setColumnSeparator(delSer.getColumnSeparator())
.setEscapeChar(delSer.getEscapeChar())
.setFieldQuote(delSer.getFieldQuote())
.setHeadersPresent(delSer.isHeadersPresent())
.setRecordSeparator(delSer.getRecordSeparator());
} else if (ser instanceof FileQueryArrowSerialization) {
FileQueryArrowSerialization arrSer = (FileQueryArrowSerialization) ser;
return new BlobQueryArrowSerialization().setSchema(toBlobQueryArrowSchema(arrSer.getSchema()));
} else if (ser instanceof FileQueryParquetSerialization) {
return new BlobQueryParquetSerialization();
} else {
throw new IllegalArgumentException(SERIALIZATION_MESSAGE);
}
}
private static List<BlobQueryArrowField> toBlobQueryArrowSchema(List<FileQueryArrowField> schema) {
if (schema == null) {
return null;
}
List<BlobQueryArrowField> blobSchema = new ArrayList<>(schema.size());
for (FileQueryArrowField field : schema) {
blobSchema.add(toBlobQueryArrowField(field));
}
return blobSchema;
}
private static BlobQueryArrowField toBlobQueryArrowField(FileQueryArrowField field) {
if (field == null) {
return null;
}
return new BlobQueryArrowField(BlobQueryArrowFieldType.fromString(field.getType().toString()))
.setName(field.getName())
.setPrecision(field.getPrecision())
.setScale(field.getScale());
}
static Consumer<BlobQueryError> toBlobQueryErrorConsumer(Consumer<FileQueryError> er) {
if (er == null) {
return null;
}
return error -> er.accept(toFileQueryError(error));
}
static Consumer<BlobQueryProgress> toBlobQueryProgressConsumer(Consumer<FileQueryProgress> pr) {
if (pr == null) {
return null;
}
return progress -> pr.accept(toFileQueryProgress(progress));
}
private static FileQueryError toFileQueryError(BlobQueryError error) {
if (error == null) {
return null;
}
return new FileQueryError(error.isFatal(), error.getName(), error.getDescription(), error.getPosition());
}
private static FileQueryProgress toFileQueryProgress(BlobQueryProgress progress) {
if (progress == null) {
return null;
}
return new FileQueryProgress(progress.getBytesScanned(), progress.getTotalBytes());
}
static FileQueryResponse toFileQueryResponse(BlobQueryResponse r) {
if (r == null) {
return null;
}
return new FileQueryResponse(Transforms.toFileQueryAsyncResponse(new BlobQueryAsyncResponse(r.getRequest(),
r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders())));
}
static FileQueryAsyncResponse toFileQueryAsyncResponse(BlobQueryAsyncResponse r) {
if (r == null) {
return null;
}
return new FileQueryAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(),
Transforms.toFileQueryHeaders(r.getDeserializedHeaders()));
}
private static FileQueryHeaders toFileQueryHeaders(BlobQueryHeaders h) {
if (h == null) {
return null;
}
return new FileQueryHeaders()
.setLastModified(h.getLastModified())
.setMetadata(h.getMetadata())
.setContentLength(h.getContentLength())
.setContentType(h.getContentType())
.setContentRange(h.getContentRange())
.setETag(h.getETag())
.setContentMd5(h.getContentMd5())
.setContentEncoding(h.getContentEncoding())
.setCacheControl(h.getCacheControl())
.setContentDisposition(h.getContentDisposition())
.setContentLanguage(h.getContentLanguage())
.setCopyCompletionTime(h.getCopyCompletionTime())
.setCopyStatusDescription(h.getCopyStatusDescription())
.setCopyId(h.getCopyId())
.setCopyProgress(h.getCopyProgress())
.setCopySource(h.getCopySource())
.setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus()))
.setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration()))
.setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState()))
.setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus()))
.setClientRequestId(h.getClientRequestId())
.setRequestId(h.getRequestId())
.setVersion(h.getVersion())
.setAcceptRanges(h.getAcceptRanges())
.setDateProperty(h.getDateProperty())
.setServerEncrypted(h.isServerEncrypted())
.setEncryptionKeySha256(h.getEncryptionKeySha256())
.setFileContentMd5(h.getContentMd5())
.setContentCrc64(h.getContentCrc64())
.setErrorCode(h.getErrorCode());
}
static BlobQueryOptions toBlobQueryOptions(FileQueryOptions options) {
if (options == null) {
return null;
}
if (options.getOutputStream() == null) {
return new BlobQueryOptions(options.getExpression())
.setInputSerialization(Transforms.toBlobQuerySerialization(options.getInputSerialization()))
.setOutputSerialization(Transforms.toBlobQuerySerialization(options.getOutputSerialization()))
.setRequestConditions(Transforms.toBlobRequestConditions(options.getRequestConditions()))
.setErrorConsumer(Transforms.toBlobQueryErrorConsumer(options.getErrorConsumer()))
.setProgressConsumer(Transforms.toBlobQueryProgressConsumer(options.getProgressConsumer()));
} else {
return new BlobQueryOptions(options.getExpression(), options.getOutputStream())
.setInputSerialization(Transforms.toBlobQuerySerialization(options.getInputSerialization()))
.setOutputSerialization(Transforms.toBlobQuerySerialization(options.getOutputSerialization()))
.setRequestConditions(Transforms.toBlobRequestConditions(options.getRequestConditions()))
.setErrorConsumer(Transforms.toBlobQueryErrorConsumer(options.getErrorConsumer()))
.setProgressConsumer(Transforms.toBlobQueryProgressConsumer(options.getProgressConsumer()));
}
}
static UndeleteBlobContainerOptions toBlobContainerUndeleteOptions(FileSystemUndeleteOptions options) {
if (options == null) {
return null;
}
return new UndeleteBlobContainerOptions(options.getDeletedFileSystemName(),
options.getDeletedFileSystemVersion()).setDestinationContainerName(options.getDestinationFileSystemName());
}
static DataLakeServiceProperties toDataLakeServiceProperties(BlobServiceProperties blobProps) {
if (blobProps == null) {
return null;
}
return new DataLakeServiceProperties()
.setDefaultServiceVersion(blobProps.getDefaultServiceVersion())
.setCors(blobProps.getCors().stream().map(Transforms::toDataLakeCorsRule).collect(Collectors.toList()))
.setDeleteRetentionPolicy(toDataLakeRetentionPolicy(blobProps.getDeleteRetentionPolicy()))
.setHourMetrics(toDataLakeMetrics(blobProps.getHourMetrics()))
.setMinuteMetrics(toDataLakeMetrics(blobProps.getMinuteMetrics()))
.setLogging(toDataLakeAnalyticsLogging(blobProps.getLogging()))
.setStaticWebsite(toDataLakeStaticWebsite(blobProps.getStaticWebsite()));
}
static DataLakeStaticWebsite toDataLakeStaticWebsite(StaticWebsite staticWebsite) {
if (staticWebsite == null) {
return null;
}
return new DataLakeStaticWebsite()
.setDefaultIndexDocumentPath(staticWebsite.getDefaultIndexDocumentPath())
.setEnabled(staticWebsite.isEnabled())
.setErrorDocument404Path(staticWebsite.getErrorDocument404Path())
.setIndexDocument(staticWebsite.getIndexDocument());
}
static DataLakeAnalyticsLogging toDataLakeAnalyticsLogging(BlobAnalyticsLogging blobLogging) {
if (blobLogging == null) {
return null;
}
return new DataLakeAnalyticsLogging()
.setDelete(blobLogging.isDelete())
.setRead(blobLogging.isRead())
.setWrite(blobLogging.isWrite())
.setRetentionPolicy(toDataLakeRetentionPolicy(blobLogging.getRetentionPolicy()))
.setVersion(blobLogging.getVersion());
}
static DataLakeCorsRule toDataLakeCorsRule(BlobCorsRule blobRule) {
if (blobRule == null) {
return null;
}
return new DataLakeCorsRule()
.setAllowedHeaders(blobRule.getAllowedHeaders())
.setAllowedMethods(blobRule.getAllowedMethods())
.setAllowedOrigins(blobRule.getAllowedOrigins())
.setExposedHeaders(blobRule.getExposedHeaders())
.setMaxAgeInSeconds(blobRule.getMaxAgeInSeconds());
}
static DataLakeMetrics toDataLakeMetrics(BlobMetrics blobMetrics) {
if (blobMetrics == null) {
return null;
}
return new DataLakeMetrics()
.setEnabled(blobMetrics.isEnabled())
.setIncludeApis(blobMetrics.isIncludeApis())
.setVersion(blobMetrics.getVersion())
.setRetentionPolicy(toDataLakeRetentionPolicy(blobMetrics.getRetentionPolicy()));
}
static DataLakeRetentionPolicy toDataLakeRetentionPolicy(BlobRetentionPolicy blobPolicy) {
if (blobPolicy == null) {
return null;
}
return new DataLakeRetentionPolicy()
.setDays(blobPolicy.getDays())
.setEnabled(blobPolicy.isEnabled());
}
static BlobServiceProperties toBlobServiceProperties(DataLakeServiceProperties datalakeProperties) {
if (datalakeProperties == null) {
return null;
}
return new BlobServiceProperties()
.setDefaultServiceVersion(datalakeProperties.getDefaultServiceVersion())
.setCors(datalakeProperties.getCors().stream().map(Transforms::toBlobCorsRule).collect(Collectors.toList()))
.setDeleteRetentionPolicy(toBlobRetentionPolicy(datalakeProperties.getDeleteRetentionPolicy()))
.setHourMetrics(toBlobMetrics(datalakeProperties.getHourMetrics()))
.setMinuteMetrics(toBlobMetrics(datalakeProperties.getMinuteMetrics()))
.setLogging(toBlobAnalyticsLogging(datalakeProperties.getLogging()))
.setStaticWebsite(toBlobStaticWebsite(datalakeProperties.getStaticWebsite()));
}
static StaticWebsite toBlobStaticWebsite(DataLakeStaticWebsite staticWebsite) {
if (staticWebsite == null) {
return null;
}
return new StaticWebsite()
.setDefaultIndexDocumentPath(staticWebsite.getDefaultIndexDocumentPath())
.setEnabled(staticWebsite.isEnabled())
.setErrorDocument404Path(staticWebsite.getErrorDocument404Path())
.setIndexDocument(staticWebsite.getIndexDocument());
}
static BlobAnalyticsLogging toBlobAnalyticsLogging(DataLakeAnalyticsLogging datalakeLogging) {
if (datalakeLogging == null) {
return null;
}
return new BlobAnalyticsLogging()
.setDelete(datalakeLogging.isDelete())
.setRead(datalakeLogging.isRead())
.setWrite(datalakeLogging.isWrite())
.setRetentionPolicy(toBlobRetentionPolicy(datalakeLogging.getRetentionPolicy()))
.setVersion(datalakeLogging.getVersion());
}
static BlobCorsRule toBlobCorsRule(DataLakeCorsRule datalakeRule) {
if (datalakeRule == null) {
return null;
}
return new BlobCorsRule()
.setAllowedHeaders(datalakeRule.getAllowedHeaders())
.setAllowedMethods(datalakeRule.getAllowedMethods())
.setAllowedOrigins(datalakeRule.getAllowedOrigins())
.setExposedHeaders(datalakeRule.getExposedHeaders())
.setMaxAgeInSeconds(datalakeRule.getMaxAgeInSeconds());
}
static BlobMetrics toBlobMetrics(DataLakeMetrics datalakeMetrics) {
if (datalakeMetrics == null) {
return null;
}
return new BlobMetrics()
.setEnabled(datalakeMetrics.isEnabled())
.setIncludeApis(datalakeMetrics.isIncludeApis())
.setVersion(datalakeMetrics.getVersion())
.setRetentionPolicy(toBlobRetentionPolicy(datalakeMetrics.getRetentionPolicy()));
}
static BlobRetentionPolicy toBlobRetentionPolicy(DataLakeRetentionPolicy datalakePolicy) {
if (datalakePolicy == null) {
return null;
}
return new BlobRetentionPolicy()
.setDays(datalakePolicy.getDays())
.setEnabled(datalakePolicy.isEnabled());
}
static PathDeletedItem toPathDeletedItem(BlobItemInternal blobItem) {
if (blobItem == null) {
return null;
}
return new PathDeletedItem(blobItem.getName(), false, blobItem.getDeletionId(),
blobItem.getProperties().getDeletedTime(), blobItem.getProperties().getRemainingRetentionDays());
}
static PathDeletedItem toPathDeletedItem(BlobPrefix blobPrefix) {
return new PathDeletedItem(blobPrefix.getName(), true, null, null, null);
}
static CustomerProvidedKey toBlobCustomerProvidedKey(
com.azure.storage.file.datalake.models.CustomerProvidedKey key) {
if (key == null) {
return null;
}
return new CustomerProvidedKey(key.getKey());
}
static CpkInfo fromBlobCpkInfo(com.azure.storage.blob.models.CpkInfo info) {
if (info == null) {
return null;
}
return new CpkInfo()
.setEncryptionKey(info.getEncryptionKey())
.setEncryptionAlgorithm(com.azure.storage.file.datalake.models.EncryptionAlgorithmType.fromString(
info.getEncryptionAlgorithm().toString()))
.setEncryptionKeySha256(info.getEncryptionKeySha256());
}
static BlobContainerEncryptionScope toBlobContainerEncryptionScope(FileSystemEncryptionScopeOptions fileSystemEncryptionScope) {
if (fileSystemEncryptionScope == null) {
return null;
}
return new BlobContainerEncryptionScope()
.setDefaultEncryptionScope(fileSystemEncryptionScope.getDefaultEncryptionScope())
.setEncryptionScopeOverridePrevented(fileSystemEncryptionScope.isEncryptionScopeOverridePrevented());
}
static BlockBlobOutputStreamOptions toBlockBlobOutputStreamOptions(DataLakeFileOutputStreamOptions options) {
if (options == null) {
return null;
}
return new BlockBlobOutputStreamOptions()
.setParallelTransferOptions(options.getParallelTransferOptions())
.setHeaders(toBlobHttpHeaders(options.getHeaders()))
.setMetadata(options.getMetadata())
.setTags(options.getTags())
.setTier(options.getAccessTier())
.setRequestConditions(toBlobRequestConditions(options.getRequestConditions()));
}
} | class Transforms {
private static final ClientLogger LOGGER = new ClientLogger(Transforms.class);
private static final String SERIALIZATION_MESSAGE = String.format("'serialization' must be one of %s, %s, %s or "
+ "%s.", FileQueryJsonSerialization.class.getSimpleName(),
FileQueryDelimitedSerialization.class.getSimpleName(), FileQueryArrowSerialization.class.getSimpleName(),
FileQueryParquetSerialization.class.getSimpleName());
private static final long EPOCH_CONVERSION;
public static final HttpHeaderName X_MS_ENCRYPTION_CONTEXT = HttpHeaderName.fromString("x-ms-encryption-context");
public static final HttpHeaderName X_MS_OWNER = HttpHeaderName.fromString("x-ms-owner");
public static final HttpHeaderName X_MS_GROUP = HttpHeaderName.fromString("x-ms-group");
public static final HttpHeaderName X_MS_PERMISSIONS = HttpHeaderName.fromString("x-ms-permissions");
public static final HttpHeaderName X_MS_CONTINUATION = HttpHeaderName.fromString("x-ms-continuation");
static {
GregorianCalendar unixEpoch = new GregorianCalendar();
unixEpoch.clear();
unixEpoch.set(1970, Calendar.JANUARY, 1, 0, 0, 0);
GregorianCalendar windowsEpoch = new GregorianCalendar();
windowsEpoch.clear();
windowsEpoch.set(1601, Calendar.JANUARY, 1, 0, 0, 0);
EPOCH_CONVERSION = unixEpoch.getTimeInMillis() - windowsEpoch.getTimeInMillis();
}
static com.azure.storage.blob.models.PublicAccessType toBlobPublicAccessType(PublicAccessType
fileSystemPublicAccessType) {
if (fileSystemPublicAccessType == null) {
return null;
}
return com.azure.storage.blob.models.PublicAccessType.fromString(fileSystemPublicAccessType.toString());
}
private static LeaseDurationType toDataLakeLeaseDurationType(com.azure.storage.blob.models.LeaseDurationType
blobLeaseDurationType) {
if (blobLeaseDurationType == null) {
return null;
}
return LeaseDurationType.fromString(blobLeaseDurationType.toString());
}
private static LeaseStateType toDataLakeLeaseStateType(com.azure.storage.blob.models.LeaseStateType
blobLeaseStateType) {
if (blobLeaseStateType == null) {
return null;
}
return LeaseStateType.fromString(blobLeaseStateType.toString());
}
private static LeaseStatusType toDataLakeLeaseStatusType(com.azure.storage.blob.models.LeaseStatusType
blobLeaseStatusType) {
if (blobLeaseStatusType == null) {
return null;
}
return LeaseStatusType.fromString(blobLeaseStatusType.toString());
}
private static PublicAccessType toDataLakePublicAccessType(com.azure.storage.blob.models.PublicAccessType
blobPublicAccessType) {
if (blobPublicAccessType == null) {
return null;
}
return PublicAccessType.fromString(blobPublicAccessType.toString());
}
private static CopyStatusType toDataLakeCopyStatusType(
com.azure.storage.blob.models.CopyStatusType blobCopyStatus) {
if (blobCopyStatus == null) {
return null;
}
return CopyStatusType.fromString(blobCopyStatus.toString());
}
private static ArchiveStatus toDataLakeArchiveStatus(
com.azure.storage.blob.models.ArchiveStatus blobArchiveStatus) {
if (blobArchiveStatus == null) {
return null;
}
return ArchiveStatus.fromString(blobArchiveStatus.toString());
}
private static AccessTier toDataLakeAccessTier(com.azure.storage.blob.models.AccessTier blobAccessTier) {
if (blobAccessTier == null) {
return null;
}
return AccessTier.fromString(blobAccessTier.toString());
}
static FileSystemProperties toFileSystemProperties(BlobContainerProperties blobContainerProperties) {
if (blobContainerProperties == null) {
return null;
}
FileSystemProperties fileSystemProperties = new FileSystemProperties(blobContainerProperties.getMetadata(), blobContainerProperties.getETag(),
blobContainerProperties.getLastModified(),
Transforms.toDataLakeLeaseDurationType(blobContainerProperties.getLeaseDuration()),
Transforms.toDataLakeLeaseStateType(blobContainerProperties.getLeaseState()),
Transforms.toDataLakeLeaseStatusType(blobContainerProperties.getLeaseStatus()),
Transforms.toDataLakePublicAccessType(blobContainerProperties.getBlobPublicAccess()),
blobContainerProperties.hasImmutabilityPolicy(), blobContainerProperties.hasLegalHold());
return AccessorUtility.getFileSystemPropertiesAccessor()
.setFileSystemProperties(fileSystemProperties, blobContainerProperties.getDefaultEncryptionScope(),
blobContainerProperties.isEncryptionScopeOverridePrevented());
}
private static BlobContainerListDetails toBlobContainerListDetails(FileSystemListDetails fileSystemListDetails) {
if (fileSystemListDetails == null) {
return null;
}
return new BlobContainerListDetails()
.setRetrieveMetadata(fileSystemListDetails.getRetrieveMetadata())
.setRetrieveDeleted(fileSystemListDetails.getRetrieveDeleted())
.setRetrieveSystemContainers(fileSystemListDetails.getRetrieveSystemFileSystems());
}
static ListBlobContainersOptions toListBlobContainersOptions(ListFileSystemsOptions listFileSystemsOptions) {
if (listFileSystemsOptions == null) {
return null;
}
return new ListBlobContainersOptions()
.setDetails(toBlobContainerListDetails(listFileSystemsOptions.getDetails()))
.setMaxResultsPerPage(listFileSystemsOptions.getMaxResultsPerPage())
.setPrefix(listFileSystemsOptions.getPrefix());
}
static UserDelegationKey toDataLakeUserDelegationKey(com.azure.storage.blob.models.UserDelegationKey
blobUserDelegationKey) {
if (blobUserDelegationKey == null) {
return null;
}
return new UserDelegationKey()
.setSignedExpiry(blobUserDelegationKey.getSignedExpiry())
.setSignedObjectId(blobUserDelegationKey.getSignedObjectId())
.setSignedTenantId(blobUserDelegationKey.getSignedTenantId())
.setSignedService(blobUserDelegationKey.getSignedService())
.setSignedStart(blobUserDelegationKey.getSignedStart())
.setSignedVersion(blobUserDelegationKey.getSignedVersion())
.setValue(blobUserDelegationKey.getValue());
}
static BlobHttpHeaders toBlobHttpHeaders(PathHttpHeaders pathHTTPHeaders) {
if (pathHTTPHeaders == null) {
return null;
}
return new BlobHttpHeaders()
.setCacheControl(pathHTTPHeaders.getCacheControl())
.setContentDisposition(pathHTTPHeaders.getContentDisposition())
.setContentEncoding(pathHTTPHeaders.getContentEncoding())
.setContentLanguage(pathHTTPHeaders.getContentLanguage())
.setContentType(pathHTTPHeaders.getContentType())
.setContentMd5(pathHTTPHeaders.getContentMd5());
}
static BlobInputStreamOptions toBlobInputStreamOptions(DataLakeFileInputStreamOptions options) {
if (options == null) {
return null;
}
return new BlobInputStreamOptions()
.setBlockSize(options.getBlockSize())
.setRange(toBlobRange(options.getRange()))
.setRequestConditions(toBlobRequestConditions(options.getRequestConditions()))
.setConsistentReadControl(toBlobConsistentReadControl(options.getConsistentReadControl()));
}
static com.azure.storage.blob.models.ConsistentReadControl toBlobConsistentReadControl(
com.azure.storage.file.datalake.models.ConsistentReadControl datalakeConsistentReadControl) {
if (datalakeConsistentReadControl == null) {
return null;
}
switch (datalakeConsistentReadControl) {
case NONE:
return ConsistentReadControl.NONE;
case ETAG:
return ConsistentReadControl.ETAG;
default:
throw new IllegalArgumentException("Could not convert ConsistentReadControl");
}
}
static BlobRange toBlobRange(FileRange fileRange) {
if (fileRange == null) {
return null;
}
return new BlobRange(fileRange.getOffset(), fileRange.getCount());
}
static com.azure.storage.blob.models.DownloadRetryOptions toBlobDownloadRetryOptions(
DownloadRetryOptions dataLakeOptions) {
if (dataLakeOptions == null) {
return null;
}
return new com.azure.storage.blob.models.DownloadRetryOptions()
.setMaxRetryRequests(dataLakeOptions.getMaxRetryRequests());
}
static PathProperties toPathProperties(BlobProperties properties) {
return toPathProperties(properties, null);
}
static PathProperties toPathProperties(BlobProperties properties, Response<?> r) {
if (properties == null) {
return null;
} else {
PathProperties pathProperties = new PathProperties(properties.getCreationTime(), properties.getLastModified(),
properties.getETag(), properties.getBlobSize(), properties.getContentType(), properties.getContentMd5(),
properties.getContentEncoding(), properties.getContentDisposition(), properties.getContentLanguage(),
properties.getCacheControl(), Transforms.toDataLakeLeaseStatusType(properties.getLeaseStatus()),
Transforms.toDataLakeLeaseStateType(properties.getLeaseState()),
Transforms.toDataLakeLeaseDurationType(properties.getLeaseDuration()), properties.getCopyId(),
Transforms.toDataLakeCopyStatusType(properties.getCopyStatus()), properties.getCopySource(),
properties.getCopyProgress(), properties.getCopyCompletionTime(), properties.getCopyStatusDescription(),
properties.isServerEncrypted(), properties.isIncrementalCopy(),
Transforms.toDataLakeAccessTier(properties.getAccessTier()),
Transforms.toDataLakeArchiveStatus(properties.getArchiveStatus()), properties.getEncryptionKeySha256(),
properties.getAccessTierChangeTime(), properties.getMetadata(), properties.getExpiresOn());
if (r == null) {
return pathProperties;
} else {
String encryptionContext = r.getHeaders().getValue(X_MS_ENCRYPTION_CONTEXT);
String owner = r.getHeaders().getValue(X_MS_OWNER);
String group = r.getHeaders().getValue(X_MS_GROUP);
String permissions = r.getHeaders().getValue(X_MS_PERMISSIONS);
return AccessorUtility.getPathPropertiesAccessor().setPathProperties(pathProperties,
properties.getEncryptionScope(), encryptionContext, owner, group, permissions);
}
}
}
static FileSystemItem toFileSystemItem(BlobContainerItem blobContainerItem) {
if (blobContainerItem == null) {
return null;
}
return new FileSystemItem()
.setName(blobContainerItem.getName())
.setDeleted(blobContainerItem.isDeleted())
.setVersion(blobContainerItem.getVersion())
.setMetadata(blobContainerItem.getMetadata())
.setProperties(Transforms.toFileSystemItemProperties(blobContainerItem.getProperties()));
}
private static FileSystemItemProperties toFileSystemItemProperties(
BlobContainerItemProperties blobContainerItemProperties) {
if (blobContainerItemProperties == null) {
return null;
}
return new FileSystemItemProperties()
.setETag(blobContainerItemProperties.getETag())
.setLastModified(blobContainerItemProperties.getLastModified())
.setLeaseStatus(toDataLakeLeaseStatusType(blobContainerItemProperties.getLeaseStatus()))
.setLeaseState(toDataLakeLeaseStateType(blobContainerItemProperties.getLeaseState()))
.setLeaseDuration(toDataLakeLeaseDurationType(blobContainerItemProperties.getLeaseDuration()))
.setPublicAccess(toDataLakePublicAccessType(blobContainerItemProperties.getPublicAccess()))
.setHasLegalHold(blobContainerItemProperties.isHasLegalHold())
.setHasImmutabilityPolicy(blobContainerItemProperties.isHasImmutabilityPolicy())
.setEncryptionScope(blobContainerItemProperties.getDefaultEncryptionScope())
.setEncryptionScopeOverridePrevented(blobContainerItemProperties.isEncryptionScopeOverridePrevented());
}
static PathItem toPathItem(Path path) {
if (path == null) {
return null;
}
PathItem pathItem = new PathItem(path.getETag(),
parseDateOrNull(path.getLastModified()), path.getContentLength() == null ? 0 : path.getContentLength(),
path.getGroup(), path.isDirectory() != null && path.isDirectory(), path.getName(), path.getOwner(),
path.getPermissions(),
parseWindowsFileTimeOrDateString(path.getCreationTime()),
parseWindowsFileTimeOrDateString(path.getExpiryTime()));
return AccessorUtility.getPathItemAccessor().setPathItemProperties(pathItem, path.getEncryptionScope(), path.getEncryptionContext());
}
private static OffsetDateTime parseDateOrNull(String date) {
try {
return date == null ? null : OffsetDateTime.parse(date, DateTimeFormatter.RFC_1123_DATE_TIME);
} catch (Exception ex) {
throw LOGGER.logExceptionAsError(new RuntimeException("Failed to parse date string: " + date, ex));
}
}
private static OffsetDateTime fromWindowsFileTimeOrNull(long fileTime) {
if (fileTime == 0) {
return null;
}
long fileTimeMs = fileTime / 10000;
long fileTimeUnixEpoch = fileTimeMs - EPOCH_CONVERSION;
return Instant.ofEpochMilli(fileTimeUnixEpoch).atOffset(ZoneOffset.UTC);
}
static BlobRequestConditions toBlobRequestConditions(DataLakeRequestConditions requestConditions) {
if (requestConditions == null) {
return null;
}
return new BlobRequestConditions()
.setLeaseId(requestConditions.getLeaseId())
.setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince())
.setIfNoneMatch(requestConditions.getIfNoneMatch())
.setIfMatch(requestConditions.getIfMatch())
.setIfModifiedSince(requestConditions.getIfModifiedSince());
}
static FileReadResponse toFileReadResponse(BlobDownloadResponse r) {
if (r == null) {
return null;
}
return new FileReadResponse(Transforms.toFileReadAsyncResponse(new BlobDownloadAsyncResponse(r.getRequest(),
r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders())));
}
static FileReadAsyncResponse toFileReadAsyncResponse(BlobDownloadAsyncResponse r) {
if (r == null) {
return null;
}
return new FileReadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(),
Transforms.toPathReadHeaders(r.getDeserializedHeaders(), r.getHeaders().getValue(X_MS_ENCRYPTION_CONTEXT)));
}
private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h, String encryptionContext) {
if (h == null) {
return null;
}
return new FileReadHeaders()
.setLastModified(h.getLastModified())
.setMetadata(h.getMetadata())
.setContentLength(h.getContentLength())
.setContentType(h.getContentType())
.setContentRange(h.getContentRange())
.setETag(h.getETag())
.setContentMd5(h.getContentMd5())
.setContentEncoding(h.getContentEncoding())
.setCacheControl(h.getCacheControl())
.setContentDisposition(h.getContentDisposition())
.setContentLanguage(h.getContentLanguage())
.setCopyCompletionTime(h.getCopyCompletionTime())
.setCopyStatusDescription(h.getCopyStatusDescription())
.setCopyId(h.getCopyId())
.setCopyProgress(h.getCopyProgress())
.setCopySource(h.getCopySource())
.setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus()))
.setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration()))
.setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState()))
.setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus()))
.setClientRequestId(h.getClientRequestId())
.setRequestId(h.getRequestId())
.setVersion(h.getVersion())
.setAcceptRanges(h.getAcceptRanges())
.setDateProperty(h.getDateProperty())
.setServerEncrypted(h.isServerEncrypted())
.setEncryptionKeySha256(h.getEncryptionKeySha256())
.setFileContentMd5(h.getBlobContentMD5())
.setContentCrc64(h.getContentCrc64())
.setErrorCode(h.getErrorCode())
.setCreationTime(h.getCreationTime())
.setEncryptionContext(encryptionContext);
}
static List<BlobSignedIdentifier> toBlobIdentifierList(List<DataLakeSignedIdentifier> identifiers) {
if (identifiers == null) {
return null;
}
List<BlobSignedIdentifier> blobIdentifiers = new ArrayList<>();
for (DataLakeSignedIdentifier identifier : identifiers) {
blobIdentifiers.add(Transforms.toBlobIdentifier(identifier));
}
return blobIdentifiers;
}
private static BlobSignedIdentifier toBlobIdentifier(DataLakeSignedIdentifier identifier) {
if (identifier == null) {
return null;
}
return new BlobSignedIdentifier()
.setId(identifier.getId())
.setAccessPolicy(Transforms.toBlobAccessPolicy(identifier.getAccessPolicy()));
}
private static BlobAccessPolicy toBlobAccessPolicy(DataLakeAccessPolicy accessPolicy) {
if (accessPolicy == null) {
return null;
}
return new BlobAccessPolicy()
.setExpiresOn(accessPolicy.getExpiresOn())
.setStartsOn(accessPolicy.getStartsOn())
.setPermissions(accessPolicy.getPermissions());
}
static FileSystemAccessPolicies toFileSystemAccessPolicies(BlobContainerAccessPolicies accessPolicies) {
if (accessPolicies == null) {
return null;
}
return new FileSystemAccessPolicies(Transforms.toDataLakePublicAccessType(accessPolicies.getBlobAccessType()),
Transforms.toDataLakeIdentifierList(accessPolicies.getIdentifiers()));
}
static List<DataLakeSignedIdentifier> toDataLakeIdentifierList(List<BlobSignedIdentifier> identifiers) {
if (identifiers == null) {
return null;
}
List<DataLakeSignedIdentifier> dataLakeIdentifiers = new ArrayList<>();
for (BlobSignedIdentifier identifier : identifiers) {
dataLakeIdentifiers.add(Transforms.toDataLakeIdentifier(identifier));
}
return dataLakeIdentifiers;
}
private static DataLakeSignedIdentifier toDataLakeIdentifier(BlobSignedIdentifier identifier) {
if (identifier == null) {
return null;
}
return new DataLakeSignedIdentifier()
.setId(identifier.getId())
.setAccessPolicy(Transforms.toDataLakeAccessPolicy(identifier.getAccessPolicy()));
}
private static DataLakeAccessPolicy toDataLakeAccessPolicy(BlobAccessPolicy accessPolicy) {
if (accessPolicy == null) {
return null;
}
return new DataLakeAccessPolicy()
.setExpiresOn(accessPolicy.getExpiresOn())
.setStartsOn(accessPolicy.getStartsOn())
.setPermissions(accessPolicy.getPermissions());
}
static BlobQuerySerialization toBlobQuerySerialization(FileQuerySerialization ser) {
if (ser == null) {
return null;
}
if (ser instanceof FileQueryJsonSerialization) {
FileQueryJsonSerialization jsonSer = (FileQueryJsonSerialization) ser;
return new BlobQueryJsonSerialization().setRecordSeparator(jsonSer.getRecordSeparator());
} else if (ser instanceof FileQueryDelimitedSerialization) {
FileQueryDelimitedSerialization delSer = (FileQueryDelimitedSerialization) ser;
return new BlobQueryDelimitedSerialization()
.setColumnSeparator(delSer.getColumnSeparator())
.setEscapeChar(delSer.getEscapeChar())
.setFieldQuote(delSer.getFieldQuote())
.setHeadersPresent(delSer.isHeadersPresent())
.setRecordSeparator(delSer.getRecordSeparator());
} else if (ser instanceof FileQueryArrowSerialization) {
FileQueryArrowSerialization arrSer = (FileQueryArrowSerialization) ser;
return new BlobQueryArrowSerialization().setSchema(toBlobQueryArrowSchema(arrSer.getSchema()));
} else if (ser instanceof FileQueryParquetSerialization) {
return new BlobQueryParquetSerialization();
} else {
throw new IllegalArgumentException(SERIALIZATION_MESSAGE);
}
}
private static List<BlobQueryArrowField> toBlobQueryArrowSchema(List<FileQueryArrowField> schema) {
if (schema == null) {
return null;
}
List<BlobQueryArrowField> blobSchema = new ArrayList<>(schema.size());
for (FileQueryArrowField field : schema) {
blobSchema.add(toBlobQueryArrowField(field));
}
return blobSchema;
}
private static BlobQueryArrowField toBlobQueryArrowField(FileQueryArrowField field) {
if (field == null) {
return null;
}
return new BlobQueryArrowField(BlobQueryArrowFieldType.fromString(field.getType().toString()))
.setName(field.getName())
.setPrecision(field.getPrecision())
.setScale(field.getScale());
}
static Consumer<BlobQueryError> toBlobQueryErrorConsumer(Consumer<FileQueryError> er) {
if (er == null) {
return null;
}
return error -> er.accept(toFileQueryError(error));
}
static Consumer<BlobQueryProgress> toBlobQueryProgressConsumer(Consumer<FileQueryProgress> pr) {
if (pr == null) {
return null;
}
return progress -> pr.accept(toFileQueryProgress(progress));
}
private static FileQueryError toFileQueryError(BlobQueryError error) {
if (error == null) {
return null;
}
return new FileQueryError(error.isFatal(), error.getName(), error.getDescription(), error.getPosition());
}
private static FileQueryProgress toFileQueryProgress(BlobQueryProgress progress) {
if (progress == null) {
return null;
}
return new FileQueryProgress(progress.getBytesScanned(), progress.getTotalBytes());
}
static FileQueryResponse toFileQueryResponse(BlobQueryResponse r) {
if (r == null) {
return null;
}
return new FileQueryResponse(Transforms.toFileQueryAsyncResponse(new BlobQueryAsyncResponse(r.getRequest(),
r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders())));
}
static FileQueryAsyncResponse toFileQueryAsyncResponse(BlobQueryAsyncResponse r) {
if (r == null) {
return null;
}
return new FileQueryAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(),
Transforms.toFileQueryHeaders(r.getDeserializedHeaders()));
}
private static FileQueryHeaders toFileQueryHeaders(BlobQueryHeaders h) {
if (h == null) {
return null;
}
return new FileQueryHeaders()
.setLastModified(h.getLastModified())
.setMetadata(h.getMetadata())
.setContentLength(h.getContentLength())
.setContentType(h.getContentType())
.setContentRange(h.getContentRange())
.setETag(h.getETag())
.setContentMd5(h.getContentMd5())
.setContentEncoding(h.getContentEncoding())
.setCacheControl(h.getCacheControl())
.setContentDisposition(h.getContentDisposition())
.setContentLanguage(h.getContentLanguage())
.setCopyCompletionTime(h.getCopyCompletionTime())
.setCopyStatusDescription(h.getCopyStatusDescription())
.setCopyId(h.getCopyId())
.setCopyProgress(h.getCopyProgress())
.setCopySource(h.getCopySource())
.setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus()))
.setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration()))
.setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState()))
.setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus()))
.setClientRequestId(h.getClientRequestId())
.setRequestId(h.getRequestId())
.setVersion(h.getVersion())
.setAcceptRanges(h.getAcceptRanges())
.setDateProperty(h.getDateProperty())
.setServerEncrypted(h.isServerEncrypted())
.setEncryptionKeySha256(h.getEncryptionKeySha256())
.setFileContentMd5(h.getContentMd5())
.setContentCrc64(h.getContentCrc64())
.setErrorCode(h.getErrorCode());
}
static BlobQueryOptions toBlobQueryOptions(FileQueryOptions options) {
if (options == null) {
return null;
}
if (options.getOutputStream() == null) {
return new BlobQueryOptions(options.getExpression())
.setInputSerialization(Transforms.toBlobQuerySerialization(options.getInputSerialization()))
.setOutputSerialization(Transforms.toBlobQuerySerialization(options.getOutputSerialization()))
.setRequestConditions(Transforms.toBlobRequestConditions(options.getRequestConditions()))
.setErrorConsumer(Transforms.toBlobQueryErrorConsumer(options.getErrorConsumer()))
.setProgressConsumer(Transforms.toBlobQueryProgressConsumer(options.getProgressConsumer()));
} else {
return new BlobQueryOptions(options.getExpression(), options.getOutputStream())
.setInputSerialization(Transforms.toBlobQuerySerialization(options.getInputSerialization()))
.setOutputSerialization(Transforms.toBlobQuerySerialization(options.getOutputSerialization()))
.setRequestConditions(Transforms.toBlobRequestConditions(options.getRequestConditions()))
.setErrorConsumer(Transforms.toBlobQueryErrorConsumer(options.getErrorConsumer()))
.setProgressConsumer(Transforms.toBlobQueryProgressConsumer(options.getProgressConsumer()));
}
}
static UndeleteBlobContainerOptions toBlobContainerUndeleteOptions(FileSystemUndeleteOptions options) {
if (options == null) {
return null;
}
return new UndeleteBlobContainerOptions(options.getDeletedFileSystemName(),
options.getDeletedFileSystemVersion()).setDestinationContainerName(options.getDestinationFileSystemName());
}
static DataLakeServiceProperties toDataLakeServiceProperties(BlobServiceProperties blobProps) {
if (blobProps == null) {
return null;
}
return new DataLakeServiceProperties()
.setDefaultServiceVersion(blobProps.getDefaultServiceVersion())
.setCors(blobProps.getCors().stream().map(Transforms::toDataLakeCorsRule).collect(Collectors.toList()))
.setDeleteRetentionPolicy(toDataLakeRetentionPolicy(blobProps.getDeleteRetentionPolicy()))
.setHourMetrics(toDataLakeMetrics(blobProps.getHourMetrics()))
.setMinuteMetrics(toDataLakeMetrics(blobProps.getMinuteMetrics()))
.setLogging(toDataLakeAnalyticsLogging(blobProps.getLogging()))
.setStaticWebsite(toDataLakeStaticWebsite(blobProps.getStaticWebsite()));
}
static DataLakeStaticWebsite toDataLakeStaticWebsite(StaticWebsite staticWebsite) {
if (staticWebsite == null) {
return null;
}
return new DataLakeStaticWebsite()
.setDefaultIndexDocumentPath(staticWebsite.getDefaultIndexDocumentPath())
.setEnabled(staticWebsite.isEnabled())
.setErrorDocument404Path(staticWebsite.getErrorDocument404Path())
.setIndexDocument(staticWebsite.getIndexDocument());
}
static DataLakeAnalyticsLogging toDataLakeAnalyticsLogging(BlobAnalyticsLogging blobLogging) {
if (blobLogging == null) {
return null;
}
return new DataLakeAnalyticsLogging()
.setDelete(blobLogging.isDelete())
.setRead(blobLogging.isRead())
.setWrite(blobLogging.isWrite())
.setRetentionPolicy(toDataLakeRetentionPolicy(blobLogging.getRetentionPolicy()))
.setVersion(blobLogging.getVersion());
}
static DataLakeCorsRule toDataLakeCorsRule(BlobCorsRule blobRule) {
if (blobRule == null) {
return null;
}
return new DataLakeCorsRule()
.setAllowedHeaders(blobRule.getAllowedHeaders())
.setAllowedMethods(blobRule.getAllowedMethods())
.setAllowedOrigins(blobRule.getAllowedOrigins())
.setExposedHeaders(blobRule.getExposedHeaders())
.setMaxAgeInSeconds(blobRule.getMaxAgeInSeconds());
}
static DataLakeMetrics toDataLakeMetrics(BlobMetrics blobMetrics) {
if (blobMetrics == null) {
return null;
}
return new DataLakeMetrics()
.setEnabled(blobMetrics.isEnabled())
.setIncludeApis(blobMetrics.isIncludeApis())
.setVersion(blobMetrics.getVersion())
.setRetentionPolicy(toDataLakeRetentionPolicy(blobMetrics.getRetentionPolicy()));
}
static DataLakeRetentionPolicy toDataLakeRetentionPolicy(BlobRetentionPolicy blobPolicy) {
if (blobPolicy == null) {
return null;
}
return new DataLakeRetentionPolicy()
.setDays(blobPolicy.getDays())
.setEnabled(blobPolicy.isEnabled());
}
static BlobServiceProperties toBlobServiceProperties(DataLakeServiceProperties datalakeProperties) {
if (datalakeProperties == null) {
return null;
}
return new BlobServiceProperties()
.setDefaultServiceVersion(datalakeProperties.getDefaultServiceVersion())
.setCors(datalakeProperties.getCors().stream().map(Transforms::toBlobCorsRule).collect(Collectors.toList()))
.setDeleteRetentionPolicy(toBlobRetentionPolicy(datalakeProperties.getDeleteRetentionPolicy()))
.setHourMetrics(toBlobMetrics(datalakeProperties.getHourMetrics()))
.setMinuteMetrics(toBlobMetrics(datalakeProperties.getMinuteMetrics()))
.setLogging(toBlobAnalyticsLogging(datalakeProperties.getLogging()))
.setStaticWebsite(toBlobStaticWebsite(datalakeProperties.getStaticWebsite()));
}
static StaticWebsite toBlobStaticWebsite(DataLakeStaticWebsite staticWebsite) {
if (staticWebsite == null) {
return null;
}
return new StaticWebsite()
.setDefaultIndexDocumentPath(staticWebsite.getDefaultIndexDocumentPath())
.setEnabled(staticWebsite.isEnabled())
.setErrorDocument404Path(staticWebsite.getErrorDocument404Path())
.setIndexDocument(staticWebsite.getIndexDocument());
}
static BlobAnalyticsLogging toBlobAnalyticsLogging(DataLakeAnalyticsLogging datalakeLogging) {
if (datalakeLogging == null) {
return null;
}
return new BlobAnalyticsLogging()
.setDelete(datalakeLogging.isDelete())
.setRead(datalakeLogging.isRead())
.setWrite(datalakeLogging.isWrite())
.setRetentionPolicy(toBlobRetentionPolicy(datalakeLogging.getRetentionPolicy()))
.setVersion(datalakeLogging.getVersion());
}
static BlobCorsRule toBlobCorsRule(DataLakeCorsRule datalakeRule) {
if (datalakeRule == null) {
return null;
}
return new BlobCorsRule()
.setAllowedHeaders(datalakeRule.getAllowedHeaders())
.setAllowedMethods(datalakeRule.getAllowedMethods())
.setAllowedOrigins(datalakeRule.getAllowedOrigins())
.setExposedHeaders(datalakeRule.getExposedHeaders())
.setMaxAgeInSeconds(datalakeRule.getMaxAgeInSeconds());
}
static BlobMetrics toBlobMetrics(DataLakeMetrics datalakeMetrics) {
if (datalakeMetrics == null) {
return null;
}
return new BlobMetrics()
.setEnabled(datalakeMetrics.isEnabled())
.setIncludeApis(datalakeMetrics.isIncludeApis())
.setVersion(datalakeMetrics.getVersion())
.setRetentionPolicy(toBlobRetentionPolicy(datalakeMetrics.getRetentionPolicy()));
}
static BlobRetentionPolicy toBlobRetentionPolicy(DataLakeRetentionPolicy datalakePolicy) {
if (datalakePolicy == null) {
return null;
}
return new BlobRetentionPolicy()
.setDays(datalakePolicy.getDays())
.setEnabled(datalakePolicy.isEnabled());
}
static PathDeletedItem toPathDeletedItem(BlobItemInternal blobItem) {
if (blobItem == null) {
return null;
}
return new PathDeletedItem(blobItem.getName(), false, blobItem.getDeletionId(),
blobItem.getProperties().getDeletedTime(), blobItem.getProperties().getRemainingRetentionDays());
}
static PathDeletedItem toPathDeletedItem(BlobPrefix blobPrefix) {
return new PathDeletedItem(blobPrefix.getName(), true, null, null, null);
}
static CustomerProvidedKey toBlobCustomerProvidedKey(
com.azure.storage.file.datalake.models.CustomerProvidedKey key) {
if (key == null) {
return null;
}
return new CustomerProvidedKey(key.getKey());
}
static CpkInfo fromBlobCpkInfo(com.azure.storage.blob.models.CpkInfo info) {
if (info == null) {
return null;
}
return new CpkInfo()
.setEncryptionKey(info.getEncryptionKey())
.setEncryptionAlgorithm(com.azure.storage.file.datalake.models.EncryptionAlgorithmType.fromString(
info.getEncryptionAlgorithm().toString()))
.setEncryptionKeySha256(info.getEncryptionKeySha256());
}
static BlobContainerEncryptionScope toBlobContainerEncryptionScope(FileSystemEncryptionScopeOptions fileSystemEncryptionScope) {
if (fileSystemEncryptionScope == null) {
return null;
}
return new BlobContainerEncryptionScope()
.setDefaultEncryptionScope(fileSystemEncryptionScope.getDefaultEncryptionScope())
.setEncryptionScopeOverridePrevented(fileSystemEncryptionScope.isEncryptionScopeOverridePrevented());
}
static BlockBlobOutputStreamOptions toBlockBlobOutputStreamOptions(DataLakeFileOutputStreamOptions options) {
if (options == null) {
return null;
}
return new BlockBlobOutputStreamOptions()
.setParallelTransferOptions(options.getParallelTransferOptions())
.setHeaders(toBlobHttpHeaders(options.getHeaders()))
.setMetadata(options.getMetadata())
.setTags(options.getTags())
.setTier(options.getAccessTier())
.setRequestConditions(toBlobRequestConditions(options.getRequestConditions()));
}
} |
Just pushed | OpenTelemetryInjector injectOtelIntoJdbcDriver() {
System.out.println("AzureJdbcDriverAutoConfiguration.injectOtelIntoJdbcDriver");
return openTelemetry -> OpenTelemetryDriver.install(openTelemetry);
} | System.out.println("AzureJdbcDriverAutoConfiguration.injectOtelIntoJdbcDriver"); | OpenTelemetryInjector injectOtelIntoJdbcDriver() {
return openTelemetry -> OpenTelemetryDriver.install(openTelemetry);
} | class AzureJdbcDriverAutoConfiguration {
@Bean
@Bean
BeanFactoryPostProcessor openTelemetryBeanCreatedBeforeDatasourceBean() {
return configurableBeanFactory -> {
BeanDefinition dataSourceBean = configurableBeanFactory.getBeanDefinition("dataSource");
dataSourceBean.setDependsOn("openTelemetry");
};
}
} | class AzureJdbcDriverAutoConfiguration {
@Bean
@Bean
BeanFactoryPostProcessor openTelemetryBeanCreatedBeforeDatasourceBean() {
return configurableBeanFactory -> {
BeanDefinition dataSourceBean = configurableBeanFactory.getBeanDefinition("dataSource");
dataSourceBean.setDependsOn("openTelemetry");
};
}
} |
nit: ```suggestion return originalRequestBody.toReplayableBinaryDataAsync().flatMap(replayableBody -> { ``` super-minor, but it initially gave me an impression that it's really buffered even if replayable and there is no way to prevent buffering | public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
HttpRequest originalHttpRequest = context.getHttpRequest();
BinaryData originalRequestBody = originalHttpRequest.getBodyAsBinaryData();
if (retryStrategy.getMaxRetries() > 0 && originalRequestBody != null && !originalRequestBody.isReplayable()) {
return originalRequestBody.toReplayableBinaryDataAsync().flatMap(bufferedBody -> {
context.getHttpRequest().setBody(bufferedBody);
return attemptAsync(context, next, originalHttpRequest, 0, null);
});
}
return attemptAsync(context, next, originalHttpRequest, 0, null);
} | return originalRequestBody.toReplayableBinaryDataAsync().flatMap(bufferedBody -> { | public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
HttpRequest originalHttpRequest = context.getHttpRequest();
BinaryData originalRequestBody = originalHttpRequest.getBodyAsBinaryData();
if (retryStrategy.getMaxRetries() > 0 && originalRequestBody != null && !originalRequestBody.isReplayable()) {
return originalRequestBody.toReplayableBinaryDataAsync().flatMap(replayableBody -> {
context.getHttpRequest().setBody(replayableBody);
return attemptAsync(context, next, originalHttpRequest, 0, null);
});
}
return attemptAsync(context, next, originalHttpRequest, 0, null);
} | class RetryPolicy implements HttpPipelinePolicy {
private static final ClientLogger LOGGER = new ClientLogger(RetryPolicy.class);
private final RetryStrategy retryStrategy;
private final HttpHeaderName retryAfterHeader;
private final ChronoUnit retryAfterTimeUnit;
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
*/
public RetryPolicy() {
this(new ExponentialBackoff(), null, null);
}
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
* and uses {@code retryAfterHeader} to look up the wait period in the returned {@link HttpResponse} to calculate
* the retry delay when a recoverable HTTP error is returned.
*
* @param retryAfterHeader The HTTP header, such as {@code Retry-After} or {@code x-ms-retry-after-ms}, to lookup
* for the retry delay. If the value is null, {@link RetryStrategy
* and ignore the delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. Null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException When {@code retryAfterTimeUnit} is null and {@code retryAfterHeader} is not null.
*/
public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit);
}
/**
* Creates {@link RetryPolicy} with the provided {@link RetryStrategy} and default {@link ExponentialBackoff} as
* {@link RetryStrategy}. It will use provided {@code retryAfterHeader} in {@link HttpResponse} headers for
* calculating retry delay.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the retry
* delay. If the value is null, {@link RetryPolicy} will use the retry strategy to compute the delay and ignore the
* delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException If {@code retryStrategy} is null or when {@code retryAfterTimeUnit} is null and
* {@code retryAfterHeader} is not null.
*/
public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
this.retryAfterHeader = HttpHeaderName.fromString(retryAfterHeader);
this.retryAfterTimeUnit = retryAfterTimeUnit;
if (!isNullOrEmpty(retryAfterHeader)) {
Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null.");
}
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryStrategy}.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @throws NullPointerException If {@code retryStrategy} is null.
*/
public RetryPolicy(RetryStrategy retryStrategy) {
this(retryStrategy, null, null);
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryOptions}.
*
* @param retryOptions The {@link RetryOptions} used to configure this {@link RetryPolicy}.
* @throws NullPointerException If {@code retryOptions} is null.
*/
public RetryPolicy(RetryOptions retryOptions) {
this(ImplUtils.getRetryStrategyFromOptions(retryOptions), null, null);
}
@Override
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
HttpRequest originalHttpRequest = context.getHttpRequest();
BinaryData originalRequestBody = originalHttpRequest.getBodyAsBinaryData();
if (retryStrategy.getMaxRetries() > 0 && originalRequestBody != null && !originalRequestBody.isReplayable()) {
context.getHttpRequest().setBody(context.getHttpRequest().getBodyAsBinaryData().toReplayableBinaryData());
}
return attemptSync(context, next, originalHttpRequest, 0, null);
}
private Mono<HttpResponse> attemptAsync(HttpPipelineCallContext context, HttpPipelineNextPolicy next,
HttpRequest originalHttpRequest, int tryCount, List<Throwable> suppressed) {
Mono<HttpResponse> request;
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
if (retryStrategy.getMaxRetries() > 0 && tryCount < retryStrategy.getMaxRetries()) {
context.setHttpRequest(originalHttpRequest.copy());
request = next.clone().process();
} else {
request = next.process();
}
return request.flatMap(httpResponse -> {
if (shouldRetry(retryStrategy, httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logRetry(tryCount, delayDuration);
httpResponse.close();
return attemptAsync(context, next, originalHttpRequest, tryCount + 1, suppressed)
.delaySubscription(delayDuration);
} else {
if (tryCount >= retryStrategy.getMaxRetries()) {
logRetryExhausted(tryCount);
}
return Mono.just(httpResponse);
}
}).onErrorResume(Exception.class, err -> {
if (shouldRetryException(retryStrategy, err, tryCount)) {
logRetryWithError(LOGGER.atVerbose(), tryCount, "Error resume.", err);
List<Throwable> suppressedLocal = suppressed == null ? new LinkedList<>() : suppressed;
suppressedLocal.add(err);
return attemptAsync(context, next, originalHttpRequest, tryCount + 1,
suppressedLocal).delaySubscription(retryStrategy.calculateRetryDelay(tryCount));
} else {
logRetryWithError(LOGGER.atError(), tryCount, "Retry attempts have been exhausted.", err);
if (suppressed != null) {
suppressed.forEach(err::addSuppressed);
}
return Mono.error(err);
}
});
}
private HttpResponse attemptSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next,
HttpRequest originalHttpRequest, int tryCount, List<Throwable> suppressed) {
HttpResponse httpResponse;
try {
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
if (retryStrategy.getMaxRetries() > 0 && tryCount < retryStrategy.getMaxRetries()) {
context.setHttpRequest(originalHttpRequest.copy());
httpResponse = next.clone().processSync();
} else {
httpResponse = next.processSync();
}
} catch (RuntimeException err) {
if (shouldRetryException(retryStrategy, err, tryCount)) {
logRetryWithError(LOGGER.atVerbose(), tryCount, "Error resume.", err);
try {
Thread.sleep(retryStrategy.calculateRetryDelay(tryCount).toMillis());
} catch (InterruptedException ie) {
throw LOGGER.logExceptionAsError(new RuntimeException(ie));
}
List<Throwable> suppressedLocal = suppressed == null ? new LinkedList<>() : suppressed;
suppressedLocal.add(err);
return attemptSync(context, next, originalHttpRequest, tryCount + 1, suppressedLocal);
} else {
logRetryWithError(LOGGER.atError(), tryCount, "Retry attempts have been exhausted.", err);
if (suppressed != null) {
suppressed.forEach(err::addSuppressed);
}
throw LOGGER.logExceptionAsError(err);
}
}
if (shouldRetry(retryStrategy, httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logRetry(tryCount, delayDuration);
httpResponse.close();
try {
Thread.sleep(delayDuration.toMillis());
} catch (InterruptedException ie) {
throw LOGGER.logExceptionAsError(new RuntimeException(ie));
}
return attemptSync(context, next, originalHttpRequest, tryCount + 1, suppressed);
} else {
if (tryCount >= retryStrategy.getMaxRetries()) {
logRetryExhausted(tryCount);
}
return httpResponse;
}
}
private static boolean shouldRetry(RetryStrategy retryStrategy, HttpResponse response, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response);
}
private static boolean shouldRetryException(RetryStrategy retryStrategy, Throwable throwable, int tryCount) {
if (tryCount >= retryStrategy.getMaxRetries()) {
return false;
}
Throwable causalThrowable = Exceptions.unwrap(throwable);
while (causalThrowable != null) {
if (retryStrategy.shouldRetryException(causalThrowable)) {
return true;
}
causalThrowable = causalThrowable.getCause();
}
return false;
}
private static void logRetry(int tryCount, Duration delayDuration) {
LOGGER.atVerbose()
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount)
.addKeyValue(LoggingKeys.DURATION_MS_KEY, delayDuration.toMillis())
.log("Retrying.");
}
private static void logRetryExhausted(int tryCount) {
LOGGER.atInfo()
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount)
.log("Retry attempts have been exhausted.");
}
private static void logRetryWithError(LoggingEventBuilder loggingEventBuilder, int tryCount, String format,
Throwable throwable) {
loggingEventBuilder
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount)
.log(format, throwable);
}
/*
* Determines the delay duration that should be waited before retrying.
*/
static Duration determineDelayDuration(HttpResponse response, int tryCount, RetryStrategy retryStrategy,
HttpHeaderName retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
if (retryAfterHeader == null) {
return getWellKnownRetryDelay(response.getHeaders(), tryCount, retryStrategy, OffsetDateTime::now);
}
String retryHeaderValue = response.getHeaderValue(retryAfterHeader);
if (isNullOrEmpty(retryHeaderValue)) {
return retryStrategy.calculateRetryDelay(tryCount);
}
return Duration.of(Integer.parseInt(retryHeaderValue), retryAfterTimeUnit);
}
/*
* Determines the delay duration that should be waited before retrying using the well-known retry headers.
*/
static Duration getWellKnownRetryDelay(HttpHeaders responseHeaders, int tryCount, RetryStrategy retryStrategy,
Supplier<OffsetDateTime> nowSupplier) {
Duration retryDelay = ImplUtils.getRetryAfterFromHeaders(responseHeaders, nowSupplier);
if (retryDelay != null) {
return retryDelay;
}
return retryStrategy.calculateRetryDelay(tryCount);
}
} | class RetryPolicy implements HttpPipelinePolicy {
private static final ClientLogger LOGGER = new ClientLogger(RetryPolicy.class);
private final RetryStrategy retryStrategy;
private final HttpHeaderName retryAfterHeader;
private final ChronoUnit retryAfterTimeUnit;
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
*/
public RetryPolicy() {
this(new ExponentialBackoff(), null, null);
}
/**
* Creates {@link RetryPolicy} using {@link ExponentialBackoff
* and uses {@code retryAfterHeader} to look up the wait period in the returned {@link HttpResponse} to calculate
* the retry delay when a recoverable HTTP error is returned.
*
* @param retryAfterHeader The HTTP header, such as {@code Retry-After} or {@code x-ms-retry-after-ms}, to lookup
* for the retry delay. If the value is null, {@link RetryStrategy
* and ignore the delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. Null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException When {@code retryAfterTimeUnit} is null and {@code retryAfterHeader} is not null.
*/
public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit);
}
/**
* Creates {@link RetryPolicy} with the provided {@link RetryStrategy} and default {@link ExponentialBackoff} as
* {@link RetryStrategy}. It will use provided {@code retryAfterHeader} in {@link HttpResponse} headers for
* calculating retry delay.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the retry
* delay. If the value is null, {@link RetryPolicy} will use the retry strategy to compute the delay and ignore the
* delay provided in response header.
* @param retryAfterTimeUnit The time unit to use when applying the retry delay. null is valid if, and only if,
* {@code retryAfterHeader} is null.
* @throws NullPointerException If {@code retryStrategy} is null or when {@code retryAfterTimeUnit} is null and
* {@code retryAfterHeader} is not null.
*/
public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null.");
this.retryAfterHeader = HttpHeaderName.fromString(retryAfterHeader);
this.retryAfterTimeUnit = retryAfterTimeUnit;
if (!isNullOrEmpty(retryAfterHeader)) {
Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null.");
}
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryStrategy}.
*
* @param retryStrategy The {@link RetryStrategy} used for retries.
* @throws NullPointerException If {@code retryStrategy} is null.
*/
public RetryPolicy(RetryStrategy retryStrategy) {
this(retryStrategy, null, null);
}
/**
* Creates a {@link RetryPolicy} with the provided {@link RetryOptions}.
*
* @param retryOptions The {@link RetryOptions} used to configure this {@link RetryPolicy}.
* @throws NullPointerException If {@code retryOptions} is null.
*/
public RetryPolicy(RetryOptions retryOptions) {
this(ImplUtils.getRetryStrategyFromOptions(retryOptions), null, null);
}
@Override
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
HttpRequest originalHttpRequest = context.getHttpRequest();
BinaryData originalRequestBody = originalHttpRequest.getBodyAsBinaryData();
if (retryStrategy.getMaxRetries() > 0 && originalRequestBody != null && !originalRequestBody.isReplayable()) {
context.getHttpRequest().setBody(context.getHttpRequest().getBodyAsBinaryData().toReplayableBinaryData());
}
return attemptSync(context, next, originalHttpRequest, 0, null);
}
private Mono<HttpResponse> attemptAsync(HttpPipelineCallContext context, HttpPipelineNextPolicy next,
HttpRequest originalHttpRequest, int tryCount, List<Throwable> suppressed) {
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
context.setHttpRequest(originalHttpRequest.copy());
return next.clone().process().flatMap(httpResponse -> {
if (shouldRetry(retryStrategy, httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logRetry(tryCount, delayDuration);
httpResponse.close();
return attemptAsync(context, next, originalHttpRequest, tryCount + 1, suppressed)
.delaySubscription(delayDuration);
} else {
if (tryCount >= retryStrategy.getMaxRetries()) {
logRetryExhausted(tryCount);
}
return Mono.just(httpResponse);
}
}).onErrorResume(Exception.class, err -> {
if (shouldRetryException(retryStrategy, err, tryCount)) {
logRetryWithError(LOGGER.atVerbose(), tryCount, "Error resume.", err);
List<Throwable> suppressedLocal = suppressed == null ? new LinkedList<>() : suppressed;
suppressedLocal.add(err);
return attemptAsync(context, next, originalHttpRequest, tryCount + 1,
suppressedLocal).delaySubscription(retryStrategy.calculateRetryDelay(tryCount));
} else {
logRetryWithError(LOGGER.atError(), tryCount, "Retry attempts have been exhausted.", err);
if (suppressed != null) {
suppressed.forEach(err::addSuppressed);
}
return Mono.error(err);
}
});
}
private HttpResponse attemptSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next,
HttpRequest originalHttpRequest, int tryCount, List<Throwable> suppressed) {
HttpResponse httpResponse;
try {
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, tryCount + 1);
context.setHttpRequest(originalHttpRequest.copy());
httpResponse = next.clone().processSync();
} catch (RuntimeException err) {
if (shouldRetryException(retryStrategy, err, tryCount)) {
logRetryWithError(LOGGER.atVerbose(), tryCount, "Error resume.", err);
try {
Thread.sleep(retryStrategy.calculateRetryDelay(tryCount).toMillis());
} catch (InterruptedException ie) {
throw LOGGER.logExceptionAsError(new RuntimeException(ie));
}
List<Throwable> suppressedLocal = suppressed == null ? new LinkedList<>() : suppressed;
suppressedLocal.add(err);
return attemptSync(context, next, originalHttpRequest, tryCount + 1, suppressedLocal);
} else {
logRetryWithError(LOGGER.atError(), tryCount, "Retry attempts have been exhausted.", err);
if (suppressed != null) {
suppressed.forEach(err::addSuppressed);
}
throw LOGGER.logExceptionAsError(err);
}
}
if (shouldRetry(retryStrategy, httpResponse, tryCount)) {
final Duration delayDuration = determineDelayDuration(httpResponse, tryCount, retryStrategy,
retryAfterHeader, retryAfterTimeUnit);
logRetry(tryCount, delayDuration);
httpResponse.close();
try {
Thread.sleep(delayDuration.toMillis());
} catch (InterruptedException ie) {
throw LOGGER.logExceptionAsError(new RuntimeException(ie));
}
return attemptSync(context, next, originalHttpRequest, tryCount + 1, suppressed);
} else {
if (tryCount >= retryStrategy.getMaxRetries()) {
logRetryExhausted(tryCount);
}
return httpResponse;
}
}
private static boolean shouldRetry(RetryStrategy retryStrategy, HttpResponse response, int tryCount) {
return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response);
}
private static boolean shouldRetryException(RetryStrategy retryStrategy, Throwable throwable, int tryCount) {
if (tryCount >= retryStrategy.getMaxRetries()) {
return false;
}
Throwable causalThrowable = Exceptions.unwrap(throwable);
while (causalThrowable != null) {
if (retryStrategy.shouldRetryException(causalThrowable)) {
return true;
}
causalThrowable = causalThrowable.getCause();
}
return false;
}
private static void logRetry(int tryCount, Duration delayDuration) {
LOGGER.atVerbose()
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount)
.addKeyValue(LoggingKeys.DURATION_MS_KEY, delayDuration.toMillis())
.log("Retrying.");
}
private static void logRetryExhausted(int tryCount) {
LOGGER.atInfo()
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount)
.log("Retry attempts have been exhausted.");
}
private static void logRetryWithError(LoggingEventBuilder loggingEventBuilder, int tryCount, String format,
Throwable throwable) {
loggingEventBuilder
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount)
.log(format, throwable);
}
/*
* Determines the delay duration that should be waited before retrying.
*/
static Duration determineDelayDuration(HttpResponse response, int tryCount, RetryStrategy retryStrategy,
HttpHeaderName retryAfterHeader, ChronoUnit retryAfterTimeUnit) {
if (retryAfterHeader == null) {
return getWellKnownRetryDelay(response.getHeaders(), tryCount, retryStrategy, OffsetDateTime::now);
}
String retryHeaderValue = response.getHeaderValue(retryAfterHeader);
if (isNullOrEmpty(retryHeaderValue)) {
return retryStrategy.calculateRetryDelay(tryCount);
}
return Duration.of(Integer.parseInt(retryHeaderValue), retryAfterTimeUnit);
}
/*
* Determines the delay duration that should be waited before retrying using the well-known retry headers.
*/
static Duration getWellKnownRetryDelay(HttpHeaders responseHeaders, int tryCount, RetryStrategy retryStrategy,
Supplier<OffsetDateTime> nowSupplier) {
Duration retryDelay = ImplUtils.getRetryAfterFromHeaders(responseHeaders, nowSupplier);
if (retryDelay != null) {
return retryDelay;
}
return retryStrategy.calculateRetryDelay(tryCount);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.